You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2018/02/10 12:07:31 UTC
[01/11] hbase git commit: HBASE-19966 The WriteEntry for WALKey maybe
null if we failed to call WAL.append [Forced Update!]
Repository: hbase
Updated Branches:
refs/heads/HBASE-19064 3f74318c6 -> 03057e197 (forced update)
HBASE-19966 The WriteEntry for WALKey maybe null if we failed to call WAL.append
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/32f235ab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/32f235ab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/32f235ab
Branch: refs/heads/HBASE-19064
Commit: 32f235abee5bd04aa2c788cca822e4d514697dac
Parents: 2b63af3
Author: zhangduo <zh...@apache.org>
Authored: Fri Feb 9 20:51:55 2018 +0800
Committer: Michael Stack <st...@apache.org>
Committed: Fri Feb 9 19:09:40 2018 -0800
----------------------------------------------------------------------
.../hadoop/hbase/regionserver/wal/WALUtil.java | 4 +-
.../org/apache/hadoop/hbase/wal/WALKeyImpl.java | 25 +++---
.../regionserver/wal/AbstractTestFSWAL.java | 88 +++++++++++++-------
.../hbase/regionserver/wal/TestFSHLog.java | 30 +++----
4 files changed, 87 insertions(+), 60 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/32f235ab/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
index f226c80..19b2ab1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
@@ -156,7 +156,9 @@ public class WALUtil {
// Call complete only here because these are markers only. They are not for clients to read.
mvcc.complete(walKey.getWriteEntry());
} catch (IOException ioe) {
- mvcc.complete(walKey.getWriteEntry());
+ if (walKey.getWriteEntry() != null) {
+ mvcc.complete(walKey.getWriteEntry());
+ }
throw ioe;
}
return walKey;
http://git-wip-us.apache.org/repos/asf/hbase/blob/32f235ab/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
index 7abd4a5..c1a77ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -18,35 +17,32 @@
*/
package org.apache.hadoop.hbase.wal;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.regionserver.SequenceId;
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-
import java.io.IOException;
-import java.io.InterruptedIOException;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
import java.util.UUID;
-
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-// imports for things that haven't moved from regionserver.wal yet.
+import org.apache.hadoop.hbase.regionserver.SequenceId;
import org.apache.hadoop.hbase.regionserver.wal.CompressionContext;
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FamilyScope;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.ScopeType;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* Default implementation of Key for an Entry in the WAL.
@@ -78,8 +74,7 @@ public class WALKeyImpl implements WALKey {
* @return A WriteEntry gotten from local WAL subsystem.
* @see #setWriteEntry(MultiVersionConcurrencyControl.WriteEntry)
*/
- public MultiVersionConcurrencyControl.WriteEntry getWriteEntry() throws InterruptedIOException {
- assert this.writeEntry != null;
+ public MultiVersionConcurrencyControl.WriteEntry getWriteEntry() {
return this.writeEntry;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/32f235ab/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
index 009cca0..1cdb6e5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
@@ -17,11 +17,13 @@
*/
package org.apache.hadoop.hbase.regionserver.wal;
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.*;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.hamcrest.CoreMatchers.*;
import java.io.IOException;
import java.util.ArrayList;
@@ -32,7 +34,6 @@ import java.util.Set;
import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -41,16 +42,17 @@ import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -153,7 +155,7 @@ public abstract class AbstractTestFSWAL {
}
}
- protected void addEdits(WAL log, RegionInfo hri, HTableDescriptor htd, int times,
+ protected void addEdits(WAL log, RegionInfo hri, TableDescriptor htd, int times,
MultiVersionConcurrencyControl mvcc, NavigableMap<byte[], Integer> scopes)
throws IOException {
final byte[] row = Bytes.toBytes("row");
@@ -249,26 +251,20 @@ public abstract class AbstractTestFSWAL {
conf1.setInt("hbase.regionserver.maxlogs", 1);
AbstractFSWAL<?> wal = newWAL(FS, CommonFSUtils.getWALRootDir(conf1), DIR.toString(),
HConstants.HREGION_OLDLOGDIR_NAME, conf1, null, true, null, null);
- HTableDescriptor t1 =
- new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
- HTableDescriptor t2 =
- new HTableDescriptor(TableName.valueOf("t2")).addFamily(new HColumnDescriptor("row"));
- RegionInfo hri1 = RegionInfoBuilder.newBuilder(t1.getTableName())
- .setStartKey(HConstants.EMPTY_START_ROW)
- .setEndKey(HConstants.EMPTY_END_ROW)
- .build();
- RegionInfo hri2 = RegionInfoBuilder.newBuilder(t2.getTableName())
- .setStartKey(HConstants.EMPTY_START_ROW)
- .setEndKey(HConstants.EMPTY_END_ROW)
- .build();
+ TableDescriptor t1 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t1"))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
+ TableDescriptor t2 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t2"))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
+ RegionInfo hri1 = RegionInfoBuilder.newBuilder(t1.getTableName()).build();
+ RegionInfo hri2 = RegionInfoBuilder.newBuilder(t2.getTableName()).build();
// add edits and roll the wal
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
NavigableMap<byte[], Integer> scopes1 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
- for (byte[] fam : t1.getFamiliesKeys()) {
+ for (byte[] fam : t1.getColumnFamilyNames()) {
scopes1.put(fam, 0);
}
NavigableMap<byte[], Integer> scopes2 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
- for (byte[] fam : t2.getFamiliesKeys()) {
+ for (byte[] fam : t2.getColumnFamilyNames()) {
scopes2.put(fam, 0);
}
try {
@@ -293,12 +289,12 @@ public abstract class AbstractTestFSWAL {
assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]);
// flush region 1, and roll the wal file. Only last wal which has entries for region1 should
// remain.
- flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
+ flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
wal.rollWriter();
// only one wal should remain now (that is for the second region).
assertEquals(1, wal.getNumRolledLogFiles());
// flush the second region
- flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getFamiliesKeys());
+ flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getColumnFamilyNames());
wal.rollWriter(true);
// no wal should remain now.
assertEquals(0, wal.getNumRolledLogFiles());
@@ -315,14 +311,14 @@ public abstract class AbstractTestFSWAL {
regionsToFlush = wal.findRegionsToForceFlush();
assertEquals(2, regionsToFlush.length);
// flush both regions
- flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
- flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getFamiliesKeys());
+ flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
+ flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getColumnFamilyNames());
wal.rollWriter(true);
assertEquals(0, wal.getNumRolledLogFiles());
// Add an edit to region1, and roll the wal.
addEdits(wal, hri1, t1, 2, mvcc, scopes1);
// tests partial flush: roll on a partial flush, and ensure that wal is not archived.
- wal.startCacheFlush(hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
+ wal.startCacheFlush(hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
wal.rollWriter();
wal.completeCacheFlush(hri1.getEncodedNameAsBytes());
assertEquals(1, wal.getNumRolledLogFiles());
@@ -364,15 +360,15 @@ public abstract class AbstractTestFSWAL {
final TableName tableName = TableName.valueOf(testName);
final RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build();
final byte[] rowName = tableName.getName();
- final HTableDescriptor htd = new HTableDescriptor(tableName);
- htd.addFamily(new HColumnDescriptor("f"));
+ final TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build();
HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDefaultRootDirPath(),
TEST_UTIL.getConfiguration(), htd);
HBaseTestingUtility.closeRegionAndWAL(r);
final int countPerFamily = 10;
final AtomicBoolean goslow = new AtomicBoolean(false);
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
- for (byte[] fam : htd.getFamiliesKeys()) {
+ for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
// subclass and doctor a method.
@@ -392,15 +388,15 @@ public abstract class AbstractTestFSWAL {
EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
try {
List<Put> puts = null;
- for (HColumnDescriptor hcd : htd.getFamilies()) {
+ for (byte[] fam : htd.getColumnFamilyNames()) {
puts =
- TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
+ TestWALReplay.addRegionEdits(rowName, fam, countPerFamily, ee, region, "x");
}
// Now assert edits made it in.
final Get g = new Get(rowName);
Result result = region.get(g);
- assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
+ assertEquals(countPerFamily * htd.getColumnFamilyNames().size(), result.size());
// Construct a WALEdit and add it a few times to the WAL.
WALEdit edits = new WALEdit();
@@ -445,4 +441,36 @@ public abstract class AbstractTestFSWAL {
wal.close();
}
}
+
+ @Test
+ public void testWriteEntryCanBeNull() throws IOException {
+ String testName = currentTest.getMethodName();
+ AbstractFSWAL<?> wal = newWAL(FS, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), testName,
+ CONF, null, true, null, null);
+ wal.close();
+ TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("table"))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
+ RegionInfo ri = RegionInfoBuilder.newBuilder(td.getTableName()).build();
+ MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
+ NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+ for (byte[] fam : td.getColumnFamilyNames()) {
+ scopes.put(fam, 0);
+ }
+ long timestamp = System.currentTimeMillis();
+ byte[] row = Bytes.toBytes("row");
+ WALEdit cols = new WALEdit();
+ cols.add(new KeyValue(row, row, row, timestamp, row));
+ WALKeyImpl key =
+ new WALKeyImpl(ri.getEncodedNameAsBytes(), td.getTableName(), SequenceId.NO_SEQUENCE_ID,
+ timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes);
+ try {
+ wal.append(ri, key, cols, true);
+ fail("Should fail since the wal has already been closed");
+ } catch (IOException e) {
+ // expected
+ assertThat(e.getMessage(), containsString("log is closed"));
+ // the WriteEntry should be null since we fail before setting it.
+ assertNull(key.getWriteEntry());
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/32f235ab/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
index 9c789ba..93ea2b8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
@@ -32,12 +32,14 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.ChunkCreator;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
@@ -97,7 +99,7 @@ public class TestFSHLog extends AbstractTestFSWAL {
SecurityException, IllegalArgumentException, IllegalAccessException {
final String name = this.name.getMethodName();
FSHLog log = new FSHLog(FS, FSUtils.getRootDir(CONF), name, HConstants.HREGION_OLDLOGDIR_NAME,
- CONF, null, true, null, null);
+ CONF, null, true, null, null);
try {
Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler");
ringBufferEventHandlerField.setAccessible(true);
@@ -107,14 +109,14 @@ public class TestFSHLog extends AbstractTestFSWAL {
FSHLog.RingBufferEventHandler.class.getDeclaredField("syncRunnerIndex");
syncRunnerIndexField.setAccessible(true);
syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1);
- HTableDescriptor htd =
- new HTableDescriptor(TableName.valueOf(this.name.getMethodName())).addFamily(new HColumnDescriptor("row"));
+ TableDescriptor htd =
+ TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName()))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
- for (byte[] fam : htd.getFamiliesKeys()) {
+ for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
- HRegionInfo hri =
- new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
+ RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
for (int i = 0; i < 10; i++) {
addEdits(log, hri, htd, 1, mvcc, scopes);
@@ -127,7 +129,7 @@ public class TestFSHLog extends AbstractTestFSWAL {
/**
* Test case for https://issues.apache.org/jira/browse/HBASE-16721
*/
- @Test (timeout = 30000)
+ @Test
public void testUnflushedSeqIdTracking() throws IOException, InterruptedException {
final String name = this.name.getMethodName();
final byte[] b = Bytes.toBytes("b");
@@ -156,10 +158,10 @@ public class TestFSHLog extends AbstractTestFSWAL {
});
// open a new region which uses this WAL
- HTableDescriptor htd =
- new HTableDescriptor(TableName.valueOf(this.name.getMethodName())).addFamily(new HColumnDescriptor(b));
- HRegionInfo hri =
- new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
+ TableDescriptor htd =
+ TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName()))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(b)).build();
+ RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
final HRegion region = TEST_UTIL.createLocalHRegion(hri, htd, log);
ExecutorService exec = Executors.newFixedThreadPool(2);
[02/11] hbase git commit: HBASE-19078 Add a remote peer cluster wal
directory config for synchronous replication
Posted by zh...@apache.org.
HBASE-19078 Add a remote peer cluster wal directory config for synchronous replication
Signed-off-by: zhangduo <zh...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ba5ff559
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ba5ff559
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ba5ff559
Branch: refs/heads/HBASE-19064
Commit: ba5ff559ed7d7aecc6d17314312ba8a1471f5917
Parents: a3e4b35
Author: Guanghao Zhang <zg...@apache.org>
Authored: Sat Jan 13 18:55:28 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Feb 10 17:47:53 2018 +0800
----------------------------------------------------------------------
.../replication/ReplicationPeerConfigUtil.java | 6 ++
.../replication/ReplicationPeerConfig.java | 21 +++++-
.../ReplicationPeerConfigBuilder.java | 7 ++
.../src/main/protobuf/Replication.proto | 1 +
.../replication/ReplicationPeerManager.java | 15 ++++
.../replication/TestReplicationAdmin.java | 77 ++++++++++++++++++++
.../src/main/ruby/hbase/replication_admin.rb | 17 +++--
hbase-shell/src/main/ruby/hbase_constants.rb | 1 +
.../src/main/ruby/shell/commands/add_peer.rb | 21 +++++-
.../src/main/ruby/shell/commands/list_peers.rb | 19 ++++-
.../test/ruby/hbase/replication_admin_test.rb | 16 ++++
11 files changed, 188 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index a234a9b..642149b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -315,6 +315,9 @@ public final class ReplicationPeerConfigUtil {
excludeNamespacesList.stream().map(ByteString::toStringUtf8).collect(Collectors.toSet()));
}
+ if (peer.hasRemoteWALDir()) {
+ builder.setRemoteWALDir(peer.getRemoteWALDir());
+ }
return builder.build();
}
@@ -371,6 +374,9 @@ public final class ReplicationPeerConfigUtil {
}
}
+ if (peerConfig.getRemoteWALDir() != null) {
+ builder.setRemoteWALDir(peerConfig.getRemoteWALDir());
+ }
return builder.build();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index bf8d030..4c10c46 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -46,6 +46,8 @@ public class ReplicationPeerConfig {
private Map<TableName, ? extends Collection<String>> excludeTableCFsMap = null;
private Set<String> excludeNamespaces = null;
private long bandwidth = 0;
+ // Used by synchronous replication
+ private String remoteWALDir;
private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) {
this.clusterKey = builder.clusterKey;
@@ -64,6 +66,7 @@ public class ReplicationPeerConfig {
builder.excludeNamespaces != null ? Collections.unmodifiableSet(builder.excludeNamespaces)
: null;
this.bandwidth = builder.bandwidth;
+ this.remoteWALDir = builder.remoteWALDir;
}
private Map<TableName, List<String>>
@@ -210,6 +213,10 @@ public class ReplicationPeerConfig {
return this;
}
+ public String getRemoteWALDir() {
+ return this.remoteWALDir;
+ }
+
public static ReplicationPeerConfigBuilder newBuilder() {
return new ReplicationPeerConfigBuilderImpl();
}
@@ -223,7 +230,8 @@ public class ReplicationPeerConfig {
.setReplicateAllUserTables(peerConfig.replicateAllUserTables())
.setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap())
.setExcludeNamespaces(peerConfig.getExcludeNamespaces())
- .setBandwidth(peerConfig.getBandwidth());
+ .setBandwidth(peerConfig.getBandwidth())
+ .setRemoteWALDir(peerConfig.getRemoteWALDir());
return builder;
}
@@ -250,6 +258,8 @@ public class ReplicationPeerConfig {
private long bandwidth = 0;
+ private String remoteWALDir = null;
+
@Override
public ReplicationPeerConfigBuilder setClusterKey(String clusterKey) {
this.clusterKey = clusterKey;
@@ -313,6 +323,12 @@ public class ReplicationPeerConfig {
}
@Override
+ public ReplicationPeerConfigBuilder setRemoteWALDir(String dir) {
+ this.remoteWALDir = dir;
+ return this;
+ }
+
+ @Override
public ReplicationPeerConfig build() {
// It would be nice to validate the configuration, but we have to work with "old" data
// from ZK which makes it much more difficult.
@@ -341,6 +357,9 @@ public class ReplicationPeerConfig {
}
}
builder.append("bandwidth=").append(bandwidth);
+ if (this.remoteWALDir != null) {
+ builder.append(",remoteWALDir=").append(remoteWALDir);
+ }
return builder.toString();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java
index 0b2f2e2..eac98c6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java
@@ -138,6 +138,13 @@ public interface ReplicationPeerConfigBuilder {
ReplicationPeerConfigBuilder setExcludeNamespaces(Set<String> namespaces);
/**
+ * Set the remote peer cluster's wal directory. Used by synchronous replication.
+ * @param dir the remote peer cluster's wal directory
+ * @return {@code this}
+ */
+ ReplicationPeerConfigBuilder setRemoteWALDir(String dir);
+
+ /**
* Builds the configuration object from the current state of {@code this}.
* @return A {@link ReplicationPeerConfig} instance.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-protocol-shaded/src/main/protobuf/Replication.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
index 9f7b4c2..44295d8 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Replication.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
@@ -48,6 +48,7 @@ message ReplicationPeer {
optional bool replicate_all = 8;
repeated TableCF exclude_table_cfs = 9;
repeated bytes exclude_namespaces = 10;
+ optional string remoteWALDir = 11;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 19fc7f4..d715e2e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -146,6 +146,21 @@ public class ReplicationPeerManager {
oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId +
" does not match new class '" + peerConfig.getReplicationEndpointImpl() + "'");
}
+
+ if (!isStringEquals(peerConfig.getRemoteWALDir(), oldPeerConfig.getRemoteWALDir())) {
+ throw new DoNotRetryIOException(
+ "Changing the remote wal dir on an existing peer is not allowed. Existing remote wal " +
+ "dir '" + oldPeerConfig.getRemoteWALDir() + "' for peer " + peerId +
+ " does not match new remote wal dir '" + peerConfig.getRemoteWALDir() + "'");
+ }
+
+ if (oldPeerConfig.getRemoteWALDir() != null) {
+ if (!ReplicationUtils.isKeyConfigEqual(oldPeerConfig, peerConfig)) {
+ throw new DoNotRetryIOException(
+ "Changing the replicated namespace/table config on a synchronous replication "
+ + "peer(peerId: " + peerId + ") is not allowed.");
+ }
+ }
}
public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 685c560..e471100 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -906,4 +906,81 @@ public class TestReplicationAdmin {
// OK
}
}
+
+ @Test
+ public void testPeerRemoteWALDir() throws Exception {
+ String rootDir = "hdfs://srv1:9999/hbase";
+ ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
+ builder.setClusterKey(KEY_ONE);
+ hbaseAdmin.addReplicationPeer(ID_ONE, builder.build());
+
+ ReplicationPeerConfig rpc = hbaseAdmin.getReplicationPeerConfig(ID_ONE);
+ assertNull(rpc.getRemoteWALDir());
+
+ try {
+ builder.setRemoteWALDir("hdfs://srv2:8888/hbase");
+ hbaseAdmin.updateReplicationPeerConfig(ID_ONE, builder.build());
+ fail("Change remote wal dir is not allowed");
+ } catch (Exception e) {
+ // OK
+ }
+
+ builder = ReplicationPeerConfig.newBuilder();
+ builder.setClusterKey(KEY_SECOND);
+ builder.setRemoteWALDir(rootDir);
+ hbaseAdmin.addReplicationPeer(ID_SECOND, builder.build());
+
+ rpc = hbaseAdmin.getReplicationPeerConfig(ID_SECOND);
+ assertEquals(rootDir, rpc.getRemoteWALDir());
+
+ try {
+ builder.setRemoteWALDir("hdfs://srv2:8888/hbase");
+ hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
+ fail("Change remote wal dir is not allowed");
+ } catch (Exception e) {
+ // OK
+ }
+
+ try {
+ builder.setRemoteWALDir(null);
+ hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
+ fail("Change remote wal dir is not allowed");
+ } catch (Exception e) {
+ // OK
+ }
+
+ try {
+ builder = ReplicationPeerConfig.newBuilder(rpc);
+ builder.setReplicateAllUserTables(false);
+ hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
+ fail(
+ "Change replicated namespace/table config on an existing synchronous peer is not allowed");
+ } catch (Exception e) {
+ // OK
+ }
+
+ try {
+ builder = ReplicationPeerConfig.newBuilder(rpc);
+ Set<String> namespaces = new HashSet<>();
+ namespaces.add("ns1");
+ builder.setExcludeNamespaces(namespaces);
+ hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
+ fail(
+ "Change replicated namespace/table config on an existing synchronous peer is not allowed");
+ } catch (Exception e) {
+ // OK
+ }
+
+ try {
+ builder = ReplicationPeerConfig.newBuilder(rpc);
+ Map<TableName, List<String>> tableCfs = new HashMap<>();
+ tableCfs.put(TableName.valueOf(name.getMethodName()), new ArrayList<>());
+ builder.setExcludeTableCFsMap(tableCfs);
+ hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
+ fail(
+ "Change replicated namespace/table config on an existing synchronous peer is not allowed");
+ } catch (Exception e) {
+ // OK
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-shell/src/main/ruby/hbase/replication_admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index b9d4a0c..ba7d191 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -64,16 +64,20 @@ module Hbase
table_cfs = args.fetch(TABLE_CFS, nil)
namespaces = args.fetch(NAMESPACES, nil)
peer_state = args.fetch(STATE, nil)
+ remote_wal_dir = args.fetch(REMOTE_WAL_DIR, nil)
# Create and populate a ReplicationPeerConfig
- builder = org.apache.hadoop.hbase.replication.ReplicationPeerConfig
- .newBuilder()
+ builder = ReplicationPeerConfig.newBuilder()
builder.set_cluster_key(cluster_key)
unless endpoint_classname.nil?
builder.set_replication_endpoint_impl(endpoint_classname)
end
+ unless remote_wal_dir.nil?
+ builder.setRemoteWALDir(remote_wal_dir)
+ end
+
unless config.nil?
builder.putAllConfiguration(config)
end
@@ -228,8 +232,7 @@ module Hbase
namespaces.each do |n|
ns_set.add(n)
end
- builder = org.apache.hadoop.hbase.replication.ReplicationPeerConfig
- .newBuilder(rpc)
+ builder = ReplicationPeerConfig.newBuilder(rpc)
builder.setNamespaces(ns_set)
@admin.updateReplicationPeerConfig(id, builder.build)
end
@@ -248,8 +251,7 @@ module Hbase
ns_set.remove(n)
end
end
- builder = org.apache.hadoop.hbase.replication.ReplicationPeerConfig
- .newBuilder(rpc)
+ builder = ReplicationPeerConfig.newBuilder(rpc)
builder.setNamespaces(ns_set)
@admin.updateReplicationPeerConfig(id, builder.build)
end
@@ -361,8 +363,7 @@ module Hbase
# Create and populate a ReplicationPeerConfig
replication_peer_config = get_peer_config(id)
- builder = org.apache.hadoop.hbase.replication.ReplicationPeerConfig
- .newBuilder(replication_peer_config)
+ builder = ReplicationPeerConfig.newBuilder(replication_peer_config)
unless config.nil?
builder.putAllConfiguration(config)
end
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-shell/src/main/ruby/hbase_constants.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb
index 28484cb..2870dfb 100644
--- a/hbase-shell/src/main/ruby/hbase_constants.rb
+++ b/hbase-shell/src/main/ruby/hbase_constants.rb
@@ -77,6 +77,7 @@ module HBaseConstants
VALUE = 'VALUE'.freeze
ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME'.freeze
CLUSTER_KEY = 'CLUSTER_KEY'.freeze
+ REMOTE_WAL_DIR = 'REMOTE_WAL_DIR'.freeze
TABLE_CFS = 'TABLE_CFS'.freeze
NAMESPACES = 'NAMESPACES'.freeze
STATE = 'STATE'.freeze
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
index eb2da83..4b6f294 100644
--- a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
@@ -35,7 +35,7 @@ to the peer cluster.
An optional parameter for table column families identifies which tables and/or column families
will be replicated to the peer cluster.
-Notice: Set a namespace in the peer config means that all tables in this namespace
+Note: Set a namespace in the peer config means that all tables in this namespace
will be replicated to the peer cluster. So if you already have set a namespace in peer config,
then you can't set this namespace's tables in the peer config again.
@@ -74,6 +74,25 @@ the key TABLE_CFS.
Note: Either CLUSTER_KEY or ENDPOINT_CLASSNAME must be specified. If ENDPOINT_CLASSNAME is specified, CLUSTER_KEY is
optional and should only be specified if a particular custom endpoint requires it.
+The default replication peer is asynchronous. You can also add a synchronous replication peer
+with REMOTE_WAL_DIR parameter. Meanwhile, synchronous replication peer also support other optional
+config for asynchronous replication peer.
+
+Examples:
+
+ hbase> add_peer '1', CLUSTER_KEY => "server1.cie.com:2181:/hbase",
+ REMOTE_WAL_DIR => "hdfs://srv1:9999/hbase"
+ hbase> add_peer '1', CLUSTER_KEY => "server1.cie.com:2181:/hbase",
+ STATE => "ENABLED", REMOTE_WAL_DIR => "hdfs://srv1:9999/hbase"
+ hbase> add_peer '1', CLUSTER_KEY => "server1.cie.com:2181:/hbase",
+ STATE => "DISABLED", REMOTE_WAL_DIR => "hdfs://srv1:9999/hbase"
+ hbase> add_peer '1', CLUSTER_KEY => "server1.cie.com:2181:/hbase",
+ REMOTE_WAL_DIR => "hdfs://srv1:9999/hbase", NAMESPACES => ["ns1", "ns2"]
+ hbase> add_peer '1', CLUSTER_KEY => "server1.cie.com:2181:/hbase",
+ REMOTE_WAL_DIR => "hdfs://srv1:9999/hbase", TABLE_CFS => { "table1" => [] }
+
+Note: The REMOTE_WAL_DIR is not allowed to change.
+
EOF
end
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index 522d23d..caeab86 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -39,7 +39,8 @@ EOF
peers = replication_admin.list_peers
formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME
- STATE REPLICATE_ALL NAMESPACES TABLE_CFS BANDWIDTH])
+ REMOTE_ROOT_DIR STATE REPLICATE_ALL
+ NAMESPACES TABLE_CFS BANDWIDTH])
peers.each do |peer|
id = peer.getPeerId
@@ -52,8 +53,20 @@ EOF
namespaces = replication_admin.show_peer_namespaces(config)
tableCFs = replication_admin.show_peer_tableCFs_by_config(config)
end
- formatter.row([id, config.getClusterKey,
- config.getReplicationEndpointImpl, state,
+ cluster_key = 'nil'
+ unless config.getClusterKey.nil?
+ cluster_key = config.getClusterKey
+ end
+ endpoint_classname = 'nil'
+ unless config.getReplicationEndpointImpl.nil?
+ endpoint_classname = config.getReplicationEndpointImpl
+ end
+ remote_root_dir = 'nil'
+ unless config.getRemoteWALDir.nil?
+ remote_root_dir = config.getRemoteWALDir
+ end
+ formatter.row([id, cluster_key, endpoint_classname,
+ remote_root_dir, state,
config.replicateAllUserTables, namespaces, tableCFs,
config.getBandwidth])
end
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5ff559/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index 0f84396..7f2b6ae 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -97,6 +97,22 @@ module Hbase
command(:remove_peer, @peer_id)
end
+ define_test "add_peer: remote wal dir" do
+ cluster_key = "server1.cie.com:2181:/hbase"
+ remote_wal_dir = "hdfs://srv1:9999/hbase"
+ args = { CLUSTER_KEY => cluster_key, REMOTE_WAL_DIR => remote_wal_dir }
+ command(:add_peer, @peer_id, args)
+
+ assert_equal(1, command(:list_peers).length)
+ peer = command(:list_peers).get(0)
+ assert_equal(@peer_id, peer.getPeerId)
+ assert_equal(cluster_key, peer.getPeerConfig.getClusterKey)
+ assert_equal(remote_wal_dir, peer.getPeerConfig.getRemoteWALDir)
+
+ # cleanup for future tests
+ command(:remove_peer, @peer_id)
+ end
+
define_test "add_peer: single zk cluster key with enabled/disabled state" do
cluster_key = "server1.cie.com:2181:/hbase"
[06/11] hbase git commit: HBASE-19747 Introduce a special WALProvider
for synchronous replication
Posted by zh...@apache.org.
HBASE-19747 Introduce a special WALProvider for synchronous replication
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1851e445
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1851e445
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1851e445
Branch: refs/heads/HBASE-19064
Commit: 1851e445feca4dee4e3ba9d7e2f957b97473fb2a
Parents: ba5ff55
Author: zhangduo <zh...@apache.org>
Authored: Fri Jan 19 18:38:39 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Feb 10 17:49:13 2018 +0800
----------------------------------------------------------------------
.../hbase/regionserver/wal/AbstractFSWAL.java | 7 +
.../hbase/regionserver/wal/AsyncFSWAL.java | 1 -
.../hbase/regionserver/wal/DualAsyncFSWAL.java | 4 +-
.../hadoop/hbase/regionserver/wal/FSHLog.java | 3 -
.../regionserver/PeerActionListener.java | 33 +++
.../SynchronousReplicationPeerProvider.java | 35 +++
.../hadoop/hbase/wal/AbstractFSWALProvider.java | 1 +
.../hadoop/hbase/wal/AsyncFSWALProvider.java | 18 +-
.../hbase/wal/NettyAsyncFSWALConfigHelper.java | 8 +-
.../hbase/wal/RegionGroupingProvider.java | 13 +-
.../wal/SynchronousReplicationWALProvider.java | 225 +++++++++++++++++++
.../org/apache/hadoop/hbase/wal/WALFactory.java | 37 ++-
.../org/apache/hadoop/hbase/wal/WALKeyImpl.java | 16 +-
.../regionserver/TestCompactionPolicy.java | 1 +
.../regionserver/TestFailedAppendAndSync.java | 122 +++++-----
.../hadoop/hbase/regionserver/TestHRegion.java | 24 +-
.../TestHRegionWithInMemoryFlush.java | 7 -
.../hbase/regionserver/TestRegionIncrement.java | 20 +-
.../hbase/regionserver/TestWALLockup.java | 1 +
.../regionserver/wal/AbstractTestWALReplay.java | 1 +
.../regionserver/wal/ProtobufLogTestHelper.java | 44 +++-
.../hbase/regionserver/wal/TestAsyncFSWAL.java | 13 +-
.../regionserver/wal/TestAsyncWALReplay.java | 4 +-
.../wal/TestCombinedAsyncWriter.java | 3 +-
.../hbase/regionserver/wal/TestFSHLog.java | 15 +-
.../hbase/regionserver/wal/TestWALReplay.java | 1 +
.../apache/hadoop/hbase/wal/IOTestProvider.java | 2 -
.../TestSynchronousReplicationWALProvider.java | 153 +++++++++++++
28 files changed, 659 insertions(+), 153 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 14fbe10..31b1c54 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -430,6 +430,13 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
this.implClassName = getClass().getSimpleName();
}
+ /**
+ * Used to initialize the WAL. Usually just call rollWriter to create the first log writer.
+ */
+ public void init() throws IOException {
+ rollWriter();
+ }
+
@Override
public void registerWALActionsListener(WALActionsListener listener) {
this.listeners.add(listener);
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 8e57441..ac72dc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -247,7 +247,6 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE);
waitOnShutdownInSeconds = conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS,
DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS);
- rollWriter();
}
private static boolean waitingRoll(int epochAndState) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 6bf9e02..f92ce93 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
@@ -38,14 +38,14 @@ public class DualAsyncFSWAL extends AsyncFSWAL {
private final Path remoteWalDir;
- public DualAsyncFSWAL(FileSystem fs, FileSystem remoteFs, Path rootDir, Path remoteRootDir,
+ public DualAsyncFSWAL(FileSystem fs, FileSystem remoteFs, Path rootDir, Path remoteWalDir,
String logDir, String archiveDir, Configuration conf, List<WALActionsListener> listeners,
boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup,
Class<? extends Channel> channelClass) throws FailedLogCloseException, IOException {
super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix,
eventLoopGroup, channelClass);
this.remoteFs = remoteFs;
- this.remoteWalDir = new Path(remoteRootDir, logDir);
+ this.remoteWalDir = remoteWalDir;
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 2023932..b11a052 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -212,9 +212,6 @@ public class FSHLog extends AbstractFSWAL<Writer> {
5);
this.closeErrorsTolerated = conf.getInt("hbase.regionserver.logroll.errors.tolerated", 2);
- // rollWriter sets this.hdfs_out if it can.
- rollWriter();
-
// This is the 'writer' -- a single threaded executor. This single thread 'consumes' what is
// put on the ring buffer.
String hostingThreadName = Thread.currentThread().getName();
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
new file mode 100644
index 0000000..74ad626
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Get notification for replication peer events. Mainly used for telling the
+ * {@link org.apache.hadoop.hbase.wal.SynchronousReplicationWALProvider} to close some WAL if not
+ * used any more.
+ * <p>
+ * TODO: Also need a synchronous peer state change notification.
+ */
+@InterfaceAudience.Private
+public interface PeerActionListener {
+
+ default void peerRemoved(String peerId) {}
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SynchronousReplicationPeerProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SynchronousReplicationPeerProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SynchronousReplicationPeerProvider.java
new file mode 100644
index 0000000..b4e04fb
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SynchronousReplicationPeerProvider.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Get the peer id and remote root dir if the region is synchronously replicated.
+ */
+@InterfaceAudience.Private
+public interface SynchronousReplicationPeerProvider {
+
+ /**
+ * Return the peer id and remote WAL directory if the region is synchronously replicated.
+ */
+ Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info);
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
index 231afd5..3eb8f8f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
@@ -137,6 +137,7 @@ public abstract class AbstractFSWALProvider<T extends AbstractFSWAL<?>> implemen
if (walCopy == null) {
walCopy = createWAL();
wal = walCopy;
+ walCopy.init();
}
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java
index 9c62bed..84e859d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.wal;
import java.io.IOException;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -31,12 +30,10 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
-import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
-import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
-import org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultThreadFactory;
/**
* A WAL provider that use {@link AsyncFSWAL}.
@@ -61,6 +58,7 @@ public class AsyncFSWALProvider extends AbstractFSWALProvider<AsyncFSWAL> {
private EventLoopGroup eventLoopGroup;
private Class<? extends Channel> channelClass;
+
@Override
protected AsyncFSWAL createWAL() throws IOException {
return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), CommonFSUtils.getWALRootDir(conf),
@@ -73,15 +71,9 @@ public class AsyncFSWALProvider extends AbstractFSWALProvider<AsyncFSWAL> {
@Override
protected void doInit(Configuration conf) throws IOException {
Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass =
- NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf);
- if (eventLoopGroupAndChannelClass != null) {
- eventLoopGroup = eventLoopGroupAndChannelClass.getFirst();
- channelClass = eventLoopGroupAndChannelClass.getSecond();
- } else {
- eventLoopGroup = new NioEventLoopGroup(1,
- new DefaultThreadFactory("AsyncFSWAL", true, Thread.MAX_PRIORITY));
- channelClass = NioSocketChannel.class;
- }
+ NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf);
+ eventLoopGroup = eventLoopGroupAndChannelClass.getFirst();
+ channelClass = eventLoopGroupAndChannelClass.getSecond();
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java
index 0836b5d..00ccb71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java
@@ -26,6 +26,9 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
+import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
+import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
+import org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultThreadFactory;
/**
* Helper class for passing netty event loop config to {@link AsyncFSWALProvider}.
@@ -55,7 +58,10 @@ public class NettyAsyncFSWALConfigHelper {
static Pair<EventLoopGroup, Class<? extends Channel>> getEventLoopConfig(Configuration conf) {
String name = conf.get(EVENT_LOOP_CONFIG);
if (StringUtils.isBlank(name)) {
- return null;
+ // create new event loop group if config is empty
+ return Pair.<EventLoopGroup, Class<? extends Channel>> newPair(
+ new NioEventLoopGroup(0, new DefaultThreadFactory("AsyncFSWAL", true, Thread.MAX_PRIORITY)),
+ NioSocketChannel.class);
}
return EVENT_LOOP_CONFIG_MAP.get(name);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
index 28817e9..0b7b8da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
@@ -31,6 +31,7 @@ import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
// imports for classes still in regionserver.wal
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.util.Bytes;
@@ -132,6 +133,7 @@ public class RegionGroupingProvider implements WALProvider {
private RegionGroupingStrategy strategy;
private WALFactory factory;
+ private Configuration conf;
private List<WALActionsListener> listeners = new ArrayList<>();
private String providerId;
private Class<? extends WALProvider> providerClass;
@@ -141,6 +143,7 @@ public class RegionGroupingProvider implements WALProvider {
if (null != strategy) {
throw new IllegalStateException("WALProvider.init should only be called once.");
}
+ this.conf = conf;
this.factory = factory;
StringBuilder sb = new StringBuilder().append(factory.factoryId);
if (providerId != null) {
@@ -156,11 +159,11 @@ public class RegionGroupingProvider implements WALProvider {
}
private WALProvider createProvider(String group) throws IOException {
- if (META_WAL_PROVIDER_ID.equals(providerId)) {
- return factory.createProvider(providerClass, META_WAL_PROVIDER_ID);
- } else {
- return factory.createProvider(providerClass, group);
- }
+ WALProvider provider = WALFactory.createProvider(providerClass);
+ provider.init(factory, conf,
+ META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group);
+ provider.addWALActionsListener(new MetricsWAL());
+ return provider;
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SynchronousReplicationWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SynchronousReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SynchronousReplicationWALProvider.java
new file mode 100644
index 0000000..f60599f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SynchronousReplicationWALProvider.java
@@ -0,0 +1,225 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.wal;
+
+import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.WAL_FILE_NAME_DELIMITER;
+import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.getWALArchiveDirectoryName;
+import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.getWALDirectoryName;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Lock;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL;
+import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+import org.apache.hadoop.hbase.replication.regionserver.PeerActionListener;
+import org.apache.hadoop.hbase.replication.regionserver.SynchronousReplicationPeerProvider;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.KeyLocker;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Streams;
+import org.apache.hbase.thirdparty.io.netty.channel.Channel;
+import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
+
+/**
+ * The special {@link WALProvider} for synchronous replication.
+ * <p>
+ * It works like an interceptor, when getting WAL, first it will check if the given region should be
+ * replicated synchronously, if so it will return a special WAL for it, otherwise it will delegate
+ * the request to the normal {@link WALProvider}.
+ */
+@InterfaceAudience.Private
+public class SynchronousReplicationWALProvider implements WALProvider, PeerActionListener {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SynchronousReplicationWALProvider.class);
+
+ private static final String LOG_SUFFIX = ".syncrep";
+
+ private final WALProvider provider;
+
+ private final SynchronousReplicationPeerProvider peerProvider;
+
+ private WALFactory factory;
+
+ private Configuration conf;
+
+ private List<WALActionsListener> listeners = new ArrayList<>();
+
+ private EventLoopGroup eventLoopGroup;
+
+ private Class<? extends Channel> channelClass;
+
+ private AtomicBoolean initialized = new AtomicBoolean(false);
+
+ private final ConcurrentMap<String, DualAsyncFSWAL> peerId2WAL = new ConcurrentHashMap<>();
+
+ private final KeyLocker<String> createLock = new KeyLocker<>();
+
+ SynchronousReplicationWALProvider(WALProvider provider,
+ SynchronousReplicationPeerProvider peerProvider) {
+ this.provider = provider;
+ this.peerProvider = peerProvider;
+ }
+
+ @Override
+ public void init(WALFactory factory, Configuration conf, String providerId) throws IOException {
+ if (!initialized.compareAndSet(false, true)) {
+ throw new IllegalStateException("WALProvider.init should only be called once.");
+ }
+ provider.init(factory, conf, providerId);
+ this.conf = conf;
+ this.factory = factory;
+ Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass =
+ NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf);
+ eventLoopGroup = eventLoopGroupAndChannelClass.getFirst();
+ channelClass = eventLoopGroupAndChannelClass.getSecond();
+ }
+
+ private String getLogPrefix(String peerId) {
+ return factory.factoryId + WAL_FILE_NAME_DELIMITER + peerId;
+ }
+
+ private DualAsyncFSWAL createWAL(String peerId, String remoteWALDir) throws IOException {
+ Path remoteWALDirPath = new Path(remoteWALDir);
+ FileSystem remoteFs = remoteWALDirPath.getFileSystem(conf);
+ return new DualAsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), remoteFs,
+ CommonFSUtils.getWALRootDir(conf), new Path(remoteWALDirPath, peerId),
+ getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId),
+ conf, listeners, true, getLogPrefix(peerId), LOG_SUFFIX, eventLoopGroup, channelClass);
+ }
+
+ private DualAsyncFSWAL getWAL(String peerId, String remoteWALDir) throws IOException {
+ DualAsyncFSWAL wal = peerId2WAL.get(peerId);
+ if (wal != null) {
+ return wal;
+ }
+ Lock lock = createLock.acquireLock(peerId);
+ try {
+ wal = peerId2WAL.get(peerId);
+ if (wal == null) {
+ wal = createWAL(peerId, remoteWALDir);
+ peerId2WAL.put(peerId, wal);
+ wal.init();
+ }
+ return wal;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public WAL getWAL(RegionInfo region) throws IOException {
+ Optional<Pair<String, String>> peerIdAndRemoteWALDir =
+ peerProvider.getPeerIdAndRemoteWALDir(region);
+ if (peerIdAndRemoteWALDir.isPresent()) {
+ Pair<String, String> pair = peerIdAndRemoteWALDir.get();
+ return getWAL(pair.getFirst(), pair.getSecond());
+ } else {
+ return provider.getWAL(region);
+ }
+ }
+
+ private Stream<WAL> getWALStream() {
+ return Streams.concat(peerId2WAL.values().stream(), provider.getWALs().stream());
+ }
+
+ @Override
+ public List<WAL> getWALs() {
+ return getWALStream().collect(Collectors.toList());
+ }
+
+ @Override
+ public void shutdown() throws IOException {
+ // save the last exception and rethrow
+ IOException failure = null;
+ for (DualAsyncFSWAL wal : peerId2WAL.values()) {
+ try {
+ wal.shutdown();
+ } catch (IOException e) {
+ LOG.error("Shutdown WAL failed", e);
+ failure = e;
+ }
+ }
+ provider.shutdown();
+ if (failure != null) {
+ throw failure;
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ // save the last exception and rethrow
+ IOException failure = null;
+ for (DualAsyncFSWAL wal : peerId2WAL.values()) {
+ try {
+ wal.close();
+ } catch (IOException e) {
+ LOG.error("Close WAL failed", e);
+ failure = e;
+ }
+ }
+ provider.close();
+ if (failure != null) {
+ throw failure;
+ }
+ }
+
+ @Override
+ public long getNumLogFiles() {
+ return peerId2WAL.size() + provider.getNumLogFiles();
+ }
+
+ @Override
+ public long getLogFileSize() {
+ return peerId2WAL.values().stream().mapToLong(DualAsyncFSWAL::getLogFileSize).sum() +
+ provider.getLogFileSize();
+ }
+
+ @Override
+ public void peerRemoved(String peerId) {
+ WAL wal = peerId2WAL.remove(peerId);
+ if (wal != null) {
+ try {
+ wal.close();
+ } catch (IOException e) {
+ LOG.error("Close WAL failed", e);
+ }
+ }
+ }
+
+ @Override
+ public void addWALActionsListener(WALActionsListener listener) {
+ listeners.add(listener);
+ provider.addWALActionsListener(listener);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index 1410b53..4e519ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
+import org.apache.hadoop.hbase.replication.regionserver.SynchronousReplicationPeerProvider;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
@@ -130,13 +131,10 @@ public class WALFactory {
}
}
- WALProvider createProvider(Class<? extends WALProvider> clazz, String providerId)
- throws IOException {
- LOG.info("Instantiating WALProvider of type " + clazz);
+ static WALProvider createProvider(Class<? extends WALProvider> clazz) throws IOException {
+ LOG.info("Instantiating WALProvider of type {}", clazz);
try {
- final WALProvider result = clazz.getDeclaredConstructor().newInstance();
- result.init(this, conf, providerId);
- return result;
+ return clazz.newInstance();
} catch (Exception e) {
LOG.error("couldn't set up WALProvider, the configured class is " + clazz);
LOG.debug("Exception details for failure to load WALProvider.", e);
@@ -148,9 +146,10 @@ public class WALFactory {
* instantiate a provider from a config property. requires conf to have already been set (as well
* as anything the provider might need to read).
*/
- WALProvider getProvider(String key, String defaultValue, String providerId) throws IOException {
- Class<? extends WALProvider> clazz = getProviderClass(key, defaultValue);
- WALProvider provider = createProvider(clazz, providerId);
+ private WALProvider getProvider(String key, String defaultValue, String providerId)
+ throws IOException {
+ WALProvider provider = createProvider(getProviderClass(key, defaultValue));
+ provider.init(this, conf, providerId);
provider.addWALActionsListener(new MetricsWAL());
return provider;
}
@@ -182,6 +181,26 @@ public class WALFactory {
}
/**
+ * A temporary constructor for testing synchronous replication.
+ * <p>
+ * Remove it once we can integrate the synchronous replication logic in RS.
+ */
+ @VisibleForTesting
+ WALFactory(Configuration conf, String factoryId, SynchronousReplicationPeerProvider peerProvider)
+ throws IOException {
+ timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000);
+ /* TODO Both of these are probably specific to the fs wal provider */
+ logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
+ AbstractFSWALProvider.Reader.class);
+ this.conf = conf;
+ this.factoryId = factoryId;
+ WALProvider provider = createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER));
+ this.provider = new SynchronousReplicationWALProvider(provider, peerProvider);
+ this.provider.addWALActionsListener(new MetricsWAL());
+ this.provider.init(this, conf, null);
+ }
+
+ /**
* Shutdown all WALs and clean up any underlying storage.
* Use only when you will not need to replay and edits that have gone to any wals from this
* factory.
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
index c1a77ee..4d1ed91 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
@@ -130,13 +130,21 @@ public class WALKeyImpl implements WALKey {
}
@VisibleForTesting
- public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename,
- long logSeqNum,
+ public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum,
final long now, UUID clusterId) {
List<UUID> clusterIds = new ArrayList<>(1);
clusterIds.add(clusterId);
- init(encodedRegionName, tablename, logSeqNum, now, clusterIds,
- HConstants.NO_NONCE, HConstants.NO_NONCE, null, null);
+ init(encodedRegionName, tablename, logSeqNum, now, clusterIds, HConstants.NO_NONCE,
+ HConstants.NO_NONCE, null, null);
+ }
+
+ @VisibleForTesting
+ public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum,
+ final long now, UUID clusterId, MultiVersionConcurrencyControl mvcc) {
+ List<UUID> clusterIds = new ArrayList<>(1);
+ clusterIds.add(clusterId);
+ init(encodedRegionName, tablename, logSeqNum, now, clusterIds, HConstants.NO_NONCE,
+ HConstants.NO_NONCE, mvcc, null);
}
// TODO: Fix being able to pass in sequenceid.
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
index ca4b227..939f35c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
@@ -100,6 +100,7 @@ public class TestCompactionPolicy {
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
hlog = new FSHLog(fs, basedir, logName, conf);
+ hlog.init();
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
region = HRegion.createHRegion(info, basedir, conf, htd, hlog);
region.close();
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
index e27a986..f9eb534 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
@@ -102,65 +102,64 @@ public class TestFailedAppendAndSync {
return name.getMethodName();
}
- /**
- * Reproduce locking up that happens when we get an exceptions appending and syncing.
- * See HBASE-14317.
- * First I need to set up some mocks for Server and RegionServerServices. I also need to
- * set up a dodgy WAL that will throw an exception when we go to append to it.
- */
- @Test (timeout=300000)
- public void testLockupAroundBadAssignSync() throws IOException {
+ // Dodgy WAL. Will throw exceptions when flags set.
+ class DodgyFSLog extends FSHLog {
+ volatile boolean throwSyncException = false;
+ volatile boolean throwAppendException = false;
final AtomicLong rolls = new AtomicLong(0);
- // Dodgy WAL. Will throw exceptions when flags set.
- class DodgyFSLog extends FSHLog {
- volatile boolean throwSyncException = false;
- volatile boolean throwAppendException = false;
- public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf)
- throws IOException {
- super(fs, root, logDir, conf);
- }
-
- @Override
- public byte[][] rollWriter(boolean force) throws FailedLogCloseException, IOException {
- byte [][] regions = super.rollWriter(force);
- rolls.getAndIncrement();
- return regions;
- }
+ public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf)
+ throws IOException {
+ super(fs, root, logDir, conf);
+ }
- @Override
- protected Writer createWriterInstance(Path path) throws IOException {
- final Writer w = super.createWriterInstance(path);
- return new Writer() {
- @Override
- public void close() throws IOException {
- w.close();
- }
+ @Override
+ public byte[][] rollWriter(boolean force) throws FailedLogCloseException, IOException {
+ byte[][] regions = super.rollWriter(force);
+ rolls.getAndIncrement();
+ return regions;
+ }
- @Override
- public void sync() throws IOException {
- if (throwSyncException) {
- throw new IOException("FAKE! Failed to replace a bad datanode...");
- }
- w.sync();
- }
+ @Override
+ protected Writer createWriterInstance(Path path) throws IOException {
+ final Writer w = super.createWriterInstance(path);
+ return new Writer() {
+ @Override
+ public void close() throws IOException {
+ w.close();
+ }
- @Override
- public void append(Entry entry) throws IOException {
- if (throwAppendException) {
- throw new IOException("FAKE! Failed to replace a bad datanode...");
- }
- w.append(entry);
- }
+ @Override
+ public void sync() throws IOException {
+ if (throwSyncException) {
+ throw new IOException("FAKE! Failed to replace a bad datanode...");
+ }
+ w.sync();
+ }
- @Override
- public long getLength() {
- return w.getLength();
- }
- };
+ @Override
+ public void append(Entry entry) throws IOException {
+ if (throwAppendException) {
+ throw new IOException("FAKE! Failed to replace a bad datanode...");
}
- }
+ w.append(entry);
+ }
+ @Override
+ public long getLength() {
+ return w.getLength();
+ }
+ };
+ }
+ }
+ /**
+ * Reproduce locking up that happens when we get an exceptions appending and syncing.
+ * See HBASE-14317.
+ * First I need to set up some mocks for Server and RegionServerServices. I also need to
+ * set up a dodgy WAL that will throw an exception when we go to append to it.
+ */
+ @Test (timeout=300000)
+ public void testLockupAroundBadAssignSync() throws IOException {
// Make up mocked server and services.
Server server = mock(Server.class);
when(server.getConfiguration()).thenReturn(CONF);
@@ -172,6 +171,7 @@ public class TestFailedAppendAndSync {
FileSystem fs = FileSystem.get(CONF);
Path rootDir = new Path(dir + getName());
DodgyFSLog dodgyWAL = new DodgyFSLog(fs, rootDir, getName(), CONF);
+ dodgyWAL.init();
LogRoller logRoller = new LogRoller(server, services);
logRoller.addWAL(dodgyWAL);
logRoller.start();
@@ -192,7 +192,7 @@ public class TestFailedAppendAndSync {
} catch (IOException ioe) {
fail();
}
- long rollsCount = rolls.get();
+ long rollsCount = dodgyWAL.rolls.get();
try {
dodgyWAL.throwAppendException = true;
dodgyWAL.throwSyncException = false;
@@ -202,8 +202,10 @@ public class TestFailedAppendAndSync {
} catch (IOException ioe) {
threwOnAppend = true;
}
- while (rollsCount == rolls.get()) Threads.sleep(100);
- rollsCount = rolls.get();
+ while (rollsCount == dodgyWAL.rolls.get()) {
+ Threads.sleep(100);
+ }
+ rollsCount = dodgyWAL.rolls.get();
// When we get to here.. we should be ok. A new WAL has been put in place. There were no
// appends to sync. We should be able to continue.
@@ -217,14 +219,16 @@ public class TestFailedAppendAndSync {
} catch (IOException ioe) {
threwOnBoth = true;
}
- while (rollsCount == rolls.get()) Threads.sleep(100);
+ while (rollsCount == dodgyWAL.rolls.get()) {
+ Threads.sleep(100);
+ }
// Again, all should be good. New WAL and no outstanding unsync'd edits so we should be able
// to just continue.
// So, should be no abort at this stage. Verify.
- Mockito.verify(server, Mockito.atLeast(0)).
- abort(Mockito.anyString(), (Throwable)Mockito.anyObject());
+ Mockito.verify(server, Mockito.atLeast(0)).abort(Mockito.anyString(),
+ Mockito.any(Throwable.class));
try {
dodgyWAL.throwAppendException = false;
dodgyWAL.throwSyncException = true;
@@ -239,8 +243,8 @@ public class TestFailedAppendAndSync {
// happens. If it don't we'll timeout the whole test. That is fine.
while (true) {
try {
- Mockito.verify(server, Mockito.atLeast(1)).
- abort(Mockito.anyString(), (Throwable)Mockito.anyObject());
+ Mockito.verify(server, Mockito.atLeast(1)).abort(Mockito.anyString(),
+ Mockito.any(Throwable.class));
break;
} catch (WantedButNotInvoked t) {
Threads.sleep(1);
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 88e1aa2..df24e0a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -217,7 +217,6 @@ public class TestHRegion {
protected static HBaseTestingUtility TEST_UTIL;
public static Configuration CONF ;
private String dir;
- private static FileSystem FILESYSTEM;
private final int MAX_VERSIONS = 2;
// Test names
@@ -239,7 +238,6 @@ public class TestHRegion {
@Before
public void setup() throws IOException {
TEST_UTIL = HBaseTestingUtility.createLocalHTU();
- FILESYSTEM = TEST_UTIL.getTestFileSystem();
CONF = TEST_UTIL.getConfiguration();
dir = TEST_UTIL.getDataTestDir("TestHRegion").toString();
method = name.getMethodName();
@@ -341,6 +339,7 @@ public class TestHRegion {
FileSystem fs = FileSystem.get(CONF);
Path rootDir = new Path(dir + "testMemstoreSnapshotSize");
MyFaultyFSLog faultyLog = new MyFaultyFSLog(fs, rootDir, "testMemstoreSnapshotSize", CONF);
+ faultyLog.init();
HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, faultyLog,
COLUMN_FAMILY_BYTES);
@@ -352,7 +351,6 @@ public class TestHRegion {
Put put = new Put(value);
put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
faultyLog.setFailureType(FaultyFSLog.FailureType.SYNC);
-
boolean threwIOE = false;
try {
region.put(put);
@@ -388,6 +386,7 @@ public class TestHRegion {
FileSystem fs = FileSystem.get(CONF);
Path rootDir = new Path(dir + testName);
FSHLog hLog = new FSHLog(fs, rootDir, testName, CONF);
+ hLog.init();
HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog,
COLUMN_FAMILY_BYTES);
HStore store = region.getStore(COLUMN_FAMILY_BYTES);
@@ -1162,6 +1161,7 @@ public class TestHRegion {
FailAppendFlushMarkerWAL wal =
new FailAppendFlushMarkerWAL(FileSystem.get(walConf), FSUtils.getRootDir(walConf),
method, walConf);
+ wal.init();
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family);
try {
@@ -1193,7 +1193,7 @@ public class TestHRegion {
wal.flushActions = new FlushAction [] {FlushAction.COMMIT_FLUSH};
wal = new FailAppendFlushMarkerWAL(FileSystem.get(walConf), FSUtils.getRootDir(walConf),
method, walConf);
-
+ wal.init();
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
HConstants.EMPTY_END_ROW, false, Durability.USE_DEFAULT, wal, family);
region.put(put);
@@ -2445,6 +2445,7 @@ public class TestHRegion {
FileSystem fs = FileSystem.get(CONF);
Path rootDir = new Path(dir + "testDataInMemoryWithoutWAL");
FSHLog hLog = new FSHLog(fs, rootDir, "testDataInMemoryWithoutWAL", CONF);
+ hLog.init();
// This chunk creation is done throughout the code base. Do we want to move it into core?
// It is missing from this test. W/o it we NPE.
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
@@ -2497,9 +2498,9 @@ public class TestHRegion {
RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class);
// Because the preBatchMutate returns void, we can't do usual Mockito when...then form. Must
// do below format (from Mockito doc).
- Mockito.doAnswer(new Answer() {
+ Mockito.doAnswer(new Answer<Void>() {
@Override
- public Object answer(InvocationOnMock invocation) throws Throwable {
+ public Void answer(InvocationOnMock invocation) throws Throwable {
MiniBatchOperationInProgress<Mutation> mb = invocation.getArgument(0);
mb.addOperationsFromCP(0, new Mutation[]{addPut});
return null;
@@ -3793,9 +3794,12 @@ public class TestHRegion {
boolean previousEmpty = res.isEmpty();
res.clear();
- InternalScanner scanner = region.getScanner(scan);
- while (scanner.next(res))
- ;
+ try (InternalScanner scanner = region.getScanner(scan)) {
+ boolean moreRows;
+ do {
+ moreRows = scanner.next(res);
+ } while (moreRows);
+ }
if (!res.isEmpty() || !previousEmpty || i > compactInterval) {
assertEquals("i=" + i, expectedCount, res.size());
long timestamp = res.get(0).getTimestamp();
@@ -3891,7 +3895,7 @@ public class TestHRegion {
region.put(put);
numPutsFinished++;
if (numPutsFinished > 0 && numPutsFinished % 47 == 0) {
- System.out.println("put iteration = " + numPutsFinished);
+ LOG.debug("put iteration = {}", numPutsFinished);
Delete delete = new Delete(row, (long) numPutsFinished - 30);
region.delete(delete);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
index dfe52d0..58f62e3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
@@ -27,25 +27,18 @@ import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
import org.apache.hadoop.hbase.wal.WAL;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* A test similar to TestHRegion, but with in-memory flush families.
* Also checks wal truncation after in-memory compaction.
*/
@Category({VerySlowRegionServerTests.class, LargeTests.class})
-@SuppressWarnings("deprecation")
public class TestHRegionWithInMemoryFlush extends TestHRegion{
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestHRegionWithInMemoryFlush.class);
- // Do not spin up clusters in here. If you need to spin up a cluster, do it
- // over in TestHRegionOnCluster.
- private static final Logger LOG = LoggerFactory.getLogger(TestHRegionWithInMemoryFlush.class);
-
/**
* @return A region on which you must call
* {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
index 8b96fa7..e5006ea 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Scan;
@@ -36,7 +37,6 @@ import org.apache.hadoop.hbase.client.TestIncrementsFromClientSide;
import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.wal.WAL;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
@@ -81,12 +81,12 @@ public class TestRegionIncrement {
}
private HRegion getRegion(final Configuration conf, final String tableName) throws IOException {
- WAL wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(),
- TEST_UTIL.getDataTestDir().toString(), conf);
+ FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(),
+ TEST_UTIL.getDataTestDir().toString(), conf);
+ wal.init();
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
- return (HRegion)TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
- HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, tableName, conf,
- false, Durability.SKIP_WAL, wal, INCREMENT_BYTES);
+ return TEST_UTIL.createLocalHRegion(TableName.valueOf(tableName), HConstants.EMPTY_BYTE_ARRAY,
+ HConstants.EMPTY_BYTE_ARRAY, false, Durability.SKIP_WAL, wal, INCREMENT_BYTES);
}
private void closeRegion(final HRegion region) throws IOException {
@@ -170,8 +170,6 @@ public class TestRegionIncrement {
/**
* Have each thread update its own Cell. Avoid contention with another thread.
- * @throws IOException
- * @throws InterruptedException
*/
@Test
public void testUnContendedSingleCellIncrement()
@@ -209,13 +207,9 @@ public class TestRegionIncrement {
/**
* Have each thread update its own Cell. Avoid contention with another thread.
- * This is
- * @throws IOException
- * @throws InterruptedException
*/
@Test
- public void testContendedAcrossCellsIncrement()
- throws IOException, InterruptedException {
+ public void testContendedAcrossCellsIncrement() throws IOException, InterruptedException {
final HRegion region = getRegion(TEST_UTIL.getConfiguration(),
TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName()));
long startTime = System.currentTimeMillis();
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index ca65914..8913343 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -214,6 +214,7 @@ public class TestWALLockup {
FileSystem fs = FileSystem.get(CONF);
Path rootDir = new Path(dir + getName());
DodgyFSLog dodgyWAL = new DodgyFSLog(fs, rootDir, getName(), CONF);
+ dodgyWAL.init();
Path originalWAL = dodgyWAL.getCurrentFileName();
// I need a log roller running.
LogRoller logRoller = new LogRoller(server, services);
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index b1e304e..7600fe9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -1097,6 +1097,7 @@ public abstract class AbstractTestWALReplay {
private MockWAL createMockWAL() throws IOException {
MockWAL wal = new MockWAL(fs, hbaseRootDir, logName, conf);
+ wal.init();
// Set down maximum recovery so we dfsclient doesn't linger retrying something
// long gone.
HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java
index ecd8e6c..49633cb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALEdit;
@@ -41,7 +42,7 @@ import org.apache.hadoop.hbase.wal.WALProvider;
/**
* Helper class for testing protobuf log.
*/
-final class ProtobufLogTestHelper {
+public final class ProtobufLogTestHelper {
private ProtobufLogTestHelper() {
}
@@ -54,17 +55,22 @@ final class ProtobufLogTestHelper {
return RegionInfoBuilder.newBuilder(tableName).setRegionId(1024).build();
}
+ private static WAL.Entry generateEdit(int i, RegionInfo hri, TableName tableName, byte[] row,
+ int columnCount, long timestamp, MultiVersionConcurrencyControl mvcc) {
+ WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, i, timestamp,
+ HConstants.DEFAULT_CLUSTER_ID, mvcc);
+ WALEdit edit = new WALEdit();
+ int prefix = i;
+ IntStream.range(0, columnCount).mapToObj(j -> toValue(prefix, j))
+ .map(value -> new KeyValue(row, row, row, timestamp, value)).forEachOrdered(edit::add);
+ return new WAL.Entry(key, edit);
+ }
+
public static void doWrite(WALProvider.Writer writer, boolean withTrailer, TableName tableName,
int columnCount, int recordCount, byte[] row, long timestamp) throws IOException {
RegionInfo hri = toRegionInfo(tableName);
for (int i = 0; i < recordCount; i++) {
- WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, i, timestamp,
- HConstants.DEFAULT_CLUSTER_ID);
- WALEdit edit = new WALEdit();
- int prefix = i;
- IntStream.range(0, columnCount).mapToObj(j -> toValue(prefix, j))
- .map(value -> new KeyValue(row, row, row, timestamp, value)).forEachOrdered(edit::add);
- writer.append(new WAL.Entry(key, edit));
+ writer.append(generateEdit(i, hri, tableName, row, columnCount, timestamp, null));
}
writer.sync();
if (withTrailer) {
@@ -72,14 +78,24 @@ final class ProtobufLogTestHelper {
}
}
- public static void doRead(ProtobufLogReader reader, boolean withTrailer, TableName tableName,
- int columnCount, int recordCount, byte[] row, long timestamp) throws IOException {
+ public static void doWrite(WAL wal, RegionInfo hri, TableName tableName, int columnCount,
+ int recordCount, byte[] row, long timestamp, MultiVersionConcurrencyControl mvcc)
+ throws IOException {
+ for (int i = 0; i < recordCount; i++) {
+ WAL.Entry entry = generateEdit(i, hri, tableName, row, columnCount, timestamp, mvcc);
+ wal.append(hri, entry.getKey(), entry.getEdit(), true);
+ }
+ wal.sync();
+ }
+
+ public static void doRead(ProtobufLogReader reader, boolean withTrailer, RegionInfo hri,
+ TableName tableName, int columnCount, int recordCount, byte[] row, long timestamp)
+ throws IOException {
if (withTrailer) {
assertNotNull(reader.trailer);
} else {
assertNull(reader.trailer);
}
- RegionInfo hri = toRegionInfo(tableName);
for (int i = 0; i < recordCount; ++i) {
WAL.Entry entry = reader.next();
assertNotNull(entry);
@@ -96,4 +112,10 @@ final class ProtobufLogTestHelper {
}
assertNull(reader.next());
}
+
+ public static void doRead(ProtobufLogReader reader, boolean withTrailer, TableName tableName,
+ int columnCount, int recordCount, byte[] row, long timestamp) throws IOException {
+ doRead(reader, withTrailer, toRegionInfo(tableName), tableName, columnCount, recordCount, row,
+ timestamp);
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
index 450c01b..5f0f77c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
@@ -67,8 +67,10 @@ public class TestAsyncFSWAL extends AbstractTestFSWAL {
protected AbstractFSWAL<?> newWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir,
Configuration conf, List<WALActionsListener> listeners, boolean failIfWALExists,
String prefix, String suffix) throws IOException {
- return new AsyncFSWAL(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix,
- suffix, GROUP, CHANNEL_CLASS);
+ AsyncFSWAL wal = new AsyncFSWAL(fs, rootDir, logDir, archiveDir, conf, listeners,
+ failIfWALExists, prefix, suffix, GROUP, CHANNEL_CLASS);
+ wal.init();
+ return wal;
}
@Override
@@ -76,15 +78,16 @@ public class TestAsyncFSWAL extends AbstractTestFSWAL {
String archiveDir, Configuration conf, List<WALActionsListener> listeners,
boolean failIfWALExists, String prefix, String suffix, final Runnable action)
throws IOException {
- return new AsyncFSWAL(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix,
- suffix, GROUP, CHANNEL_CLASS) {
+ AsyncFSWAL wal = new AsyncFSWAL(fs, rootDir, logDir, archiveDir, conf, listeners,
+ failIfWALExists, prefix, suffix, GROUP, CHANNEL_CLASS) {
@Override
void atHeadOfRingBufferEventHandlerAppend() {
action.run();
super.atHeadOfRingBufferEventHandlerAppend();
}
-
};
+ wal.init();
+ return wal;
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java
index 80b7477..0740954 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java
@@ -66,7 +66,9 @@ public class TestAsyncWALReplay extends AbstractTestWALReplay {
@Override
protected WAL createWAL(Configuration c, Path hbaseRootDir, String logName) throws IOException {
- return new AsyncFSWAL(FileSystem.get(c), hbaseRootDir, logName,
+ AsyncFSWAL wal = new AsyncFSWAL(FileSystem.get(c), hbaseRootDir, logName,
HConstants.HREGION_OLDLOGDIR_NAME, c, null, true, null, null, GROUP, CHANNEL_CLASS);
+ wal.init();
+ return wal;
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
index d74f9d8..36dbe0f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
@@ -77,8 +77,7 @@ public class TestCombinedAsyncWriter {
CHANNEL_CLASS = NioSocketChannel.class;
UTIL.startMiniDFSCluster(3);
UTIL.getTestFileSystem().mkdirs(UTIL.getDataTestDirOnTestFS());
- WALS =
- new WALFactory(UTIL.getConfiguration(), null, TestCombinedAsyncWriter.class.getSimpleName());
+ WALS = new WALFactory(UTIL.getConfiguration(), TestCombinedAsyncWriter.class.getSimpleName());
}
@AfterClass
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
index 93ea2b8..b0937f7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
@@ -74,8 +74,10 @@ public class TestFSHLog extends AbstractTestFSWAL {
protected AbstractFSWAL<?> newWAL(FileSystem fs, Path rootDir, String walDir, String archiveDir,
Configuration conf, List<WALActionsListener> listeners, boolean failIfWALExists,
String prefix, String suffix) throws IOException {
- return new FSHLog(fs, rootDir, walDir, archiveDir, conf, listeners, failIfWALExists, prefix,
- suffix);
+ FSHLog wal =
+ new FSHLog(fs, rootDir, walDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix);
+ wal.init();
+ return wal;
}
@Override
@@ -83,8 +85,8 @@ public class TestFSHLog extends AbstractTestFSWAL {
String archiveDir, Configuration conf, List<WALActionsListener> listeners,
boolean failIfWALExists, String prefix, String suffix, final Runnable action)
throws IOException {
- return new FSHLog(fs, rootDir, walDir, archiveDir, conf, listeners, failIfWALExists, prefix,
- suffix) {
+ FSHLog wal = new FSHLog(fs, rootDir, walDir, archiveDir, conf, listeners, failIfWALExists,
+ prefix, suffix) {
@Override
void atHeadOfRingBufferEventHandlerAppend() {
@@ -92,6 +94,8 @@ public class TestFSHLog extends AbstractTestFSWAL {
super.atHeadOfRingBufferEventHandlerAppend();
}
};
+ wal.init();
+ return wal;
}
@Test
@@ -100,6 +104,7 @@ public class TestFSHLog extends AbstractTestFSWAL {
final String name = this.name.getMethodName();
FSHLog log = new FSHLog(FS, FSUtils.getRootDir(CONF), name, HConstants.HREGION_OLDLOGDIR_NAME,
CONF, null, true, null, null);
+ log.init();
try {
Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler");
ringBufferEventHandlerField.setAccessible(true);
@@ -142,7 +147,7 @@ public class TestFSHLog extends AbstractTestFSWAL {
try (FSHLog log =
new FSHLog(FS, FSUtils.getRootDir(CONF), name, HConstants.HREGION_OLDLOGDIR_NAME, CONF,
null, true, null, null)) {
-
+ log.init();
log.registerWALActionsListener(new WALActionsListener() {
@Override
public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit)
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
index 649e981..66e19a8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
@@ -48,6 +48,7 @@ public class TestWALReplay extends AbstractTestWALReplay {
@Override
protected WAL createWAL(Configuration c, Path hbaseRootDir, String logName) throws IOException {
FSHLog wal = new FSHLog(FileSystem.get(c), hbaseRootDir, logName, c);
+ wal.init();
// Set down maximum recovery so we dfsclient doesn't linger retrying something
// long gone.
HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
index 3928d9c..f996ce0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
@@ -104,8 +104,6 @@ public class IOTestProvider implements WALProvider {
this.factory = factory;
this.conf = conf;
this.providerId = providerId != null ? providerId : DEFAULT_PROVIDER_ID;
-
-
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/1851e445/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSynchronousReplicationWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSynchronousReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSynchronousReplicationWALProvider.java
new file mode 100644
index 0000000..e6031c6
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSynchronousReplicationWALProvider.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.wal;
+
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+
+import java.io.IOException;
+import java.util.Optional;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
+import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL;
+import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
+import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogTestHelper;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ RegionServerTests.class, MediumTests.class })
+public class TestSynchronousReplicationWALProvider {
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static String PEER_ID = "1";
+
+ private static String REMOTE_WAL_DIR = "/RemoteWAL";
+
+ private static TableName TABLE = TableName.valueOf("table");
+
+ private static TableName TABLE_NO_REP = TableName.valueOf("table-no-rep");
+
+ private static RegionInfo REGION = RegionInfoBuilder.newBuilder(TABLE).build();
+
+ private static RegionInfo REGION_NO_REP = RegionInfoBuilder.newBuilder(TABLE_NO_REP).build();
+
+ private static WALFactory FACTORY;
+
+ private static Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info) {
+ if (info.getTable().equals(TABLE)) {
+ return Optional.of(Pair.newPair(PEER_ID, REMOTE_WAL_DIR));
+ } else {
+ return Optional.empty();
+ }
+ }
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ UTIL.startMiniDFSCluster(3);
+ FACTORY = new WALFactory(UTIL.getConfiguration(), "test",
+ TestSynchronousReplicationWALProvider::getPeerIdAndRemoteWALDir);
+ UTIL.getTestFileSystem().mkdirs(new Path(REMOTE_WAL_DIR, PEER_ID));
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws IOException {
+ FACTORY.close();
+ UTIL.shutdownMiniDFSCluster();
+ }
+
+ private void testReadWrite(DualAsyncFSWAL wal) throws Exception {
+ int recordCount = 100;
+ int columnCount = 10;
+ byte[] row = Bytes.toBytes("testRow");
+ long timestamp = System.currentTimeMillis();
+ MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
+ ProtobufLogTestHelper.doWrite(wal, REGION, TABLE, columnCount, recordCount, row, timestamp,
+ mvcc);
+ Path localFile = wal.getCurrentFileName();
+ Path remoteFile = new Path(REMOTE_WAL_DIR + "/" + PEER_ID, localFile.getName());
+ try (ProtobufLogReader reader =
+ (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) {
+ ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row,
+ timestamp);
+ }
+ try (ProtobufLogReader reader =
+ (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) {
+ ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row,
+ timestamp);
+ }
+ wal.rollWriter();
+ DistributedFileSystem dfs = (DistributedFileSystem) UTIL.getDFSCluster().getFileSystem();
+ UTIL.waitFor(5000, new ExplainingPredicate<Exception>() {
+
+ @Override
+ public boolean evaluate() throws Exception {
+ return dfs.isFileClosed(localFile) && dfs.isFileClosed(remoteFile);
+ }
+
+ @Override
+ public String explainFailure() throws Exception {
+ StringBuilder sb = new StringBuilder();
+ if (!dfs.isFileClosed(localFile)) {
+ sb.append(localFile + " has not been closed yet.");
+ }
+ if (!dfs.isFileClosed(remoteFile)) {
+ sb.append(remoteFile + " has not been closed yet.");
+ }
+ return sb.toString();
+ }
+ });
+ try (ProtobufLogReader reader =
+ (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) {
+ ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row,
+ timestamp);
+ }
+ try (ProtobufLogReader reader =
+ (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) {
+ ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row,
+ timestamp);
+ }
+ }
+
+ @Test
+ public void test() throws Exception {
+ WAL walNoRep = FACTORY.getWAL(REGION_NO_REP);
+ assertThat(walNoRep, not(instanceOf(DualAsyncFSWAL.class)));
+ DualAsyncFSWAL wal = (DualAsyncFSWAL) FACTORY.getWAL(REGION);
+ assertEquals(2, FACTORY.getWALs().size());
+ testReadWrite(wal);
+ SynchronousReplicationWALProvider walProvider =
+ (SynchronousReplicationWALProvider) FACTORY.getWALProvider();
+ walProvider.peerRemoved(PEER_ID);
+ assertEquals(1, FACTORY.getWALs().size());
+ }
+}
[04/11] hbase git commit: HBASE-19957 General framework to transit
sync replication state
Posted by zh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 85b2e85..7b8c43b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationListener;
import org.apache.hadoop.hbase.replication.ReplicationPeer;
import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
@@ -136,6 +137,8 @@ public class ReplicationSourceManager implements ReplicationListener {
// For recovered source, the queue id's format is peer_id-servername-*
private final ConcurrentMap<String, Map<String, SortedSet<String>>> walsByIdRecoveredQueues;
+ private final SyncReplicationPeerMappingManager syncReplicationPeerMappingManager;
+
private final Configuration conf;
private final FileSystem fs;
// The paths to the latest log of each wal group, for new coming peers
@@ -172,9 +175,8 @@ public class ReplicationSourceManager implements ReplicationListener {
public ReplicationSourceManager(ReplicationQueueStorage queueStorage,
ReplicationPeers replicationPeers, ReplicationTracker replicationTracker, Configuration conf,
Server server, FileSystem fs, Path logDir, Path oldLogDir, UUID clusterId,
- WALFileLengthProvider walFileLengthProvider) throws IOException {
- // CopyOnWriteArrayList is thread-safe.
- // Generally, reading is more than modifying.
+ WALFileLengthProvider walFileLengthProvider,
+ SyncReplicationPeerMappingManager syncReplicationPeerMappingManager) throws IOException {
this.sources = new ConcurrentHashMap<>();
this.queueStorage = queueStorage;
this.replicationPeers = replicationPeers;
@@ -187,10 +189,11 @@ public class ReplicationSourceManager implements ReplicationListener {
this.fs = fs;
this.logDir = logDir;
this.oldLogDir = oldLogDir;
- this.sleepBeforeFailover = conf.getLong("replication.sleep.before.failover", 30000); // 30
- // seconds
+ // 30 seconds
+ this.sleepBeforeFailover = conf.getLong("replication.sleep.before.failover", 30000);
this.clusterId = clusterId;
this.walFileLengthProvider = walFileLengthProvider;
+ this.syncReplicationPeerMappingManager = syncReplicationPeerMappingManager;
this.replicationTracker.registerListener(this);
// It's preferable to failover 1 RS at a time, but with good zk servers
// more could be processed at the same time.
@@ -254,8 +257,11 @@ public class ReplicationSourceManager implements ReplicationListener {
}
/**
- * 1. Add peer to replicationPeers 2. Add the normal source and related replication queue 3. Add
- * HFile Refs
+ * <ol>
+ * <li>Add peer to replicationPeers</li>
+ * <li>Add the normal source and related replication queue</li>
+ * <li>Add HFile Refs</li>
+ * </ol>
* @param peerId the id of replication peer
*/
public void addPeer(String peerId) throws IOException {
@@ -274,13 +280,16 @@ public class ReplicationSourceManager implements ReplicationListener {
}
/**
- * 1. Remove peer for replicationPeers 2. Remove all the recovered sources for the specified id
- * and related replication queues 3. Remove the normal source and related replication queue 4.
- * Remove HFile Refs
+ * <ol>
+ * <li>Remove peer for replicationPeers</li>
+ * <li>Remove all the recovered sources for the specified id and related replication queues</li>
+ * <li>Remove the normal source and related replication queue</li>
+ * <li>Remove HFile Refs</li>
+ * </ol>
* @param peerId the id of the replication peer
*/
public void removePeer(String peerId) {
- replicationPeers.removePeer(peerId);
+ ReplicationPeer peer = replicationPeers.removePeer(peerId);
String terminateMessage = "Replication stream was removed by a user";
List<ReplicationSourceInterface> oldSourcesToDelete = new ArrayList<>();
// synchronized on oldsources to avoid adding recovered source for the to-be-removed peer
@@ -311,7 +320,10 @@ public class ReplicationSourceManager implements ReplicationListener {
deleteQueue(peerId);
this.walsById.remove(peerId);
}
-
+ ReplicationPeerConfig peerConfig = peer.getPeerConfig();
+ if (peerConfig.isSyncReplication()) {
+ syncReplicationPeerMappingManager.remove(peerId, peerConfig);
+ }
// Remove HFile Refs
abortWhenFail(() -> this.queueStorage.removePeerFromHFileRefs(peerId));
}
@@ -363,6 +375,10 @@ public class ReplicationSourceManager implements ReplicationListener {
}
}
}
+ ReplicationPeerConfig peerConfig = peer.getPeerConfig();
+ if (peerConfig.isSyncReplication()) {
+ syncReplicationPeerMappingManager.add(peer.getId(), peerConfig);
+ }
src.startup();
return src;
}
@@ -442,6 +458,7 @@ public class ReplicationSourceManager implements ReplicationListener {
// Delete queue from storage and memory
deleteQueue(src.getQueueId());
this.walsById.remove(src.getQueueId());
+
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
new file mode 100644
index 0000000..92f2c52
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Get the information for a sync replication peer.
+ */
+@InterfaceAudience.Private
+public interface SyncReplicationPeerInfoProvider {
+
+ /**
+ * Return the peer id and remote WAL directory if the region is synchronously replicated and the
+ * state is {@link SyncReplicationState#ACTIVE}.
+ */
+ Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info);
+
+ /**
+ * Check whether the give region is contained in a sync replication peer which is in the given
+ * state.
+ */
+ boolean isInState(RegionInfo info, SyncReplicationState state);
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
new file mode 100644
index 0000000..32159e6
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+class SyncReplicationPeerInfoProviderImpl implements SyncReplicationPeerInfoProvider {
+
+ private final ReplicationPeers replicationPeers;
+
+ private final SyncReplicationPeerMappingManager mapping;
+
+ SyncReplicationPeerInfoProviderImpl(ReplicationPeers replicationPeers,
+ SyncReplicationPeerMappingManager mapping) {
+ this.replicationPeers = replicationPeers;
+ this.mapping = mapping;
+ }
+
+ @Override
+ public Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info) {
+ String peerId = mapping.getPeerId(info);
+ if (peerId == null) {
+ return Optional.empty();
+ }
+ ReplicationPeer peer = replicationPeers.getPeer(peerId);
+ if (peer == null) {
+ return Optional.empty();
+ }
+ if (peer.getSyncReplicationState() == SyncReplicationState.ACTIVE) {
+ return Optional.of(Pair.newPair(peerId, peer.getPeerConfig().getRemoteWALDir()));
+ } else {
+ return Optional.empty();
+ }
+ }
+
+ @Override
+ public boolean isInState(RegionInfo info, SyncReplicationState state) {
+ String peerId = mapping.getPeerId(info);
+ if (peerId == null) {
+ return false;
+ }
+ ReplicationPeer peer = replicationPeers.getPeer(peerId);
+ if (peer == null) {
+ return false;
+ }
+ return peer.getSyncReplicationState() == state;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java
new file mode 100644
index 0000000..64216cb
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Used to map region to sync replication peer id.
+ * <p>
+ * TODO: now only support include table options.
+ */
+@InterfaceAudience.Private
+class SyncReplicationPeerMappingManager {
+
+ private final ConcurrentMap<TableName, String> table2PeerId = new ConcurrentHashMap<>();
+
+ void add(String peerId, ReplicationPeerConfig peerConfig) {
+ peerConfig.getTableCFsMap().keySet().forEach(tn -> table2PeerId.put(tn, peerId));
+ }
+
+ void remove(String peerId, ReplicationPeerConfig peerConfig) {
+ peerConfig.getTableCFsMap().keySet().forEach(table2PeerId::remove);
+ }
+
+ String getPeerId(RegionInfo info) {
+ return table2PeerId.get(info.getTable());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerProvider.java
deleted file mode 100644
index b97bf7e..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerProvider.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication.regionserver;
-
-import java.util.Optional;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Get the peer id and remote root dir if the region is synchronously replicated.
- */
-@InterfaceAudience.Private
-public interface SyncReplicationPeerProvider {
-
- /**
- * Return the peer id and remote WAL directory if the region is synchronously replicated.
- */
- Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info);
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index bccc842..e3de6b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.replication.regionserver.PeerActionListener;
-import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerProvider;
+import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerInfoProvider;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
@@ -67,7 +67,7 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen
private final WALProvider provider;
- private final SyncReplicationPeerProvider peerProvider;
+ private SyncReplicationPeerInfoProvider peerInfoProvider;
private WALFactory factory;
@@ -85,9 +85,12 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen
private final KeyLocker<String> createLock = new KeyLocker<>();
- SyncReplicationWALProvider(WALProvider provider, SyncReplicationPeerProvider peerProvider) {
+ SyncReplicationWALProvider(WALProvider provider) {
this.provider = provider;
- this.peerProvider = peerProvider;
+ }
+
+ public void setPeerInfoProvider(SyncReplicationPeerInfoProvider peerInfoProvider) {
+ this.peerInfoProvider = peerInfoProvider;
}
@Override
@@ -99,7 +102,7 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen
this.conf = conf;
this.factory = factory;
Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass =
- NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf);
+ NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf);
eventLoopGroup = eventLoopGroupAndChannelClass.getFirst();
channelClass = eventLoopGroupAndChannelClass.getSecond();
}
@@ -112,9 +115,9 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen
Path remoteWALDirPath = new Path(remoteWALDir);
FileSystem remoteFs = remoteWALDirPath.getFileSystem(conf);
return new DualAsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), remoteFs,
- CommonFSUtils.getWALRootDir(conf), new Path(remoteWALDirPath, peerId),
- getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId),
- conf, listeners, true, getLogPrefix(peerId), LOG_SUFFIX, eventLoopGroup, channelClass);
+ CommonFSUtils.getWALRootDir(conf), new Path(remoteWALDirPath, peerId),
+ getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId),
+ conf, listeners, true, getLogPrefix(peerId), LOG_SUFFIX, eventLoopGroup, channelClass);
}
private DualAsyncFSWAL getWAL(String peerId, String remoteWALDir) throws IOException {
@@ -139,7 +142,7 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen
@Override
public WAL getWAL(RegionInfo region) throws IOException {
Optional<Pair<String, String>> peerIdAndRemoteWALDir =
- peerProvider.getPeerIdAndRemoteWALDir(region);
+ peerInfoProvider.getPeerIdAndRemoteWALDir(region);
if (peerIdAndRemoteWALDir.isPresent()) {
Pair<String, String> pair = peerIdAndRemoteWALDir.get();
return getWAL(pair.getFirst(), pair.getSecond());
@@ -221,14 +224,12 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen
}
@Override
- public void peerRemoved(String peerId) {
- safeClose(peerId2WAL.remove(peerId));
- }
-
- @Override
public void peerSyncReplicationStateChange(String peerId, SyncReplicationState from,
- SyncReplicationState to) {
- assert to == SyncReplicationState.DOWNGRADE_ACTIVE;
- safeClose(peerId2WAL.remove(peerId));
+ SyncReplicationState to, int stage) {
+ // TODO: stage 0
+ if (from == SyncReplicationState.ACTIVE && to == SyncReplicationState.DOWNGRADE_ACTIVE &&
+ stage == 1) {
+ safeClose(peerId2WAL.remove(peerId));
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index 06999ea..202b584 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -24,10 +24,10 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
-import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerProvider;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
@@ -143,18 +143,6 @@ public class WALFactory {
}
/**
- * instantiate a provider from a config property. requires conf to have already been set (as well
- * as anything the provider might need to read).
- */
- private WALProvider getProvider(String key, String defaultValue, String providerId)
- throws IOException {
- WALProvider provider = createProvider(getProviderClass(key, defaultValue));
- provider.init(this, conf, providerId);
- provider.addWALActionsListener(new MetricsWAL());
- return provider;
- }
-
- /**
* @param conf must not be null, will keep a reference to read params in later reader/writer
* instances.
* @param factoryId a unique identifier for this factory. used i.e. by filesystem implementations
@@ -171,7 +159,13 @@ public class WALFactory {
this.factoryId = factoryId;
// end required early initialization
if (conf.getBoolean("hbase.regionserver.hlog.enabled", true)) {
- provider = getProvider(WAL_PROVIDER, DEFAULT_WAL_PROVIDER, null);
+ WALProvider provider = createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER));
+ if (conf.getBoolean(HConstants.SYNC_REPLICATION_ENABLED, false)) {
+ provider = new SyncReplicationWALProvider(provider);
+ }
+ provider.init(this, conf, null);
+ provider.addWALActionsListener(new MetricsWAL());
+ this.provider = provider;
} else {
// special handling of existing configuration behavior.
LOG.warn("Running with WAL disabled.");
@@ -181,26 +175,6 @@ public class WALFactory {
}
/**
- * A temporary constructor for testing synchronous replication.
- * <p>
- * Remove it once we can integrate the synchronous replication logic in RS.
- */
- @VisibleForTesting
- WALFactory(Configuration conf, String factoryId, SyncReplicationPeerProvider peerProvider)
- throws IOException {
- timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000);
- /* TODO Both of these are probably specific to the fs wal provider */
- logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
- AbstractFSWALProvider.Reader.class);
- this.conf = conf;
- this.factoryId = factoryId;
- WALProvider provider = createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER));
- this.provider = new SyncReplicationWALProvider(provider, peerProvider);
- this.provider.init(this, conf, null);
- this.provider.addWALActionsListener(new MetricsWAL());
- }
-
- /**
* Shutdown all WALs and clean up any underlying storage.
* Use only when you will not need to replay and edits that have gone to any wals from this
* factory.
@@ -248,8 +222,9 @@ public class WALFactory {
if (provider != null) {
return provider;
}
- provider = getProvider(META_WAL_PROVIDER, DEFAULT_META_WAL_PROVIDER,
- AbstractFSWALProvider.META_WAL_PROVIDER_ID);
+ provider = createProvider(getProviderClass(META_WAL_PROVIDER, DEFAULT_META_WAL_PROVIDER));
+ provider.init(this, conf, AbstractFSWALProvider.META_WAL_PROVIDER_ID);
+ provider.addWALActionsListener(new MetricsWAL());
if (metaProvider.compareAndSet(null, provider)) {
return provider;
} else {
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index d462dbd..0ad476f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.replication.TestReplicationEndpoint.InterClusterR
import org.apache.hadoop.hbase.replication.regionserver.TestReplicator.ReplicationEndpointForTest;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -1008,7 +1009,7 @@ public class TestReplicationAdmin {
@Test
public void testTransitSyncReplicationPeerState() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
-
+ TEST_UTIL.createTable(tableName, Bytes.toBytes("family"));
ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
builder.setClusterKey(KEY_ONE);
builder.setReplicateAllUserTables(false);
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 3d7cdaf..9b03386 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -174,7 +174,10 @@ public abstract class TestReplicationSourceManager {
ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/sync-rep-state");
ZKUtil.setData(zkw, "/hbase/replication/peers/1/sync-rep-state",
- SyncReplicationState.toByteArray(SyncReplicationState.NONE));
+ ZKReplicationPeerStorage.NONE_STATE_ZNODE_BYTES);
+ ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/new-sync-rep-state");
+ ZKUtil.setData(zkw, "/hbase/replication/peers/1/new-sync-rep-state",
+ ZKReplicationPeerStorage.NONE_STATE_ZNODE_BYTES);
ZKUtil.createWithParents(zkw, "/hbase/replication/state");
ZKUtil.setData(zkw, "/hbase/replication/state", ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
index f09e51e..986228c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
@@ -27,6 +27,7 @@ import java.util.Optional;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -35,6 +36,8 @@ import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL;
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogTestHelper;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerInfoProvider;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -51,7 +54,7 @@ public class TestSyncReplicationWALProvider {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestSyncReplicationWALProvider.class);
+ HBaseClassTestRule.forClass(TestSyncReplicationWALProvider.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
@@ -69,19 +72,30 @@ public class TestSyncReplicationWALProvider {
private static WALFactory FACTORY;
- private static Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info) {
- if (info.getTable().equals(TABLE)) {
- return Optional.of(Pair.newPair(PEER_ID, REMOTE_WAL_DIR));
- } else {
- return Optional.empty();
+ public static final class InfoProvider implements SyncReplicationPeerInfoProvider {
+
+ @Override
+ public Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info) {
+ if (info.getTable().equals(TABLE)) {
+ return Optional.of(Pair.newPair(PEER_ID, REMOTE_WAL_DIR));
+ } else {
+ return Optional.empty();
+ }
+ }
+
+ @Override
+ public boolean isInState(RegionInfo info, SyncReplicationState state) {
+ // TODO Implement SyncReplicationPeerInfoProvider.isInState
+ return false;
}
}
@BeforeClass
public static void setUpBeforeClass() throws Exception {
+ UTIL.getConfiguration().setBoolean(HConstants.SYNC_REPLICATION_ENABLED, true);
UTIL.startMiniDFSCluster(3);
- FACTORY = new WALFactory(UTIL.getConfiguration(), "test",
- TestSyncReplicationWALProvider::getPeerIdAndRemoteWALDir);
+ FACTORY = new WALFactory(UTIL.getConfiguration(), "test");
+ ((SyncReplicationWALProvider) FACTORY.getWALProvider()).setPeerInfoProvider(new InfoProvider());
UTIL.getTestFileSystem().mkdirs(new Path(REMOTE_WAL_DIR, PEER_ID));
}
@@ -151,9 +165,9 @@ public class TestSyncReplicationWALProvider {
DualAsyncFSWAL wal = (DualAsyncFSWAL) FACTORY.getWAL(REGION);
assertEquals(2, FACTORY.getWALs().size());
testReadWrite(wal);
- SyncReplicationWALProvider walProvider =
- (SyncReplicationWALProvider) FACTORY.getWALProvider();
- walProvider.peerRemoved(PEER_ID);
+ SyncReplicationWALProvider walProvider = (SyncReplicationWALProvider) FACTORY.getWALProvider();
+ walProvider.peerSyncReplicationStateChange(PEER_ID, SyncReplicationState.ACTIVE,
+ SyncReplicationState.DOWNGRADE_ACTIVE, 1);
assertEquals(1, FACTORY.getWALs().size());
}
}
[07/11] hbase git commit: HBASE-19864 Use protobuf instead of
enum.ordinal to store SyncReplicationState
Posted by zh...@apache.org.
HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState
Signed-off-by: zhangduo <zh...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2221b840
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2221b840
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2221b840
Branch: refs/heads/HBASE-19064
Commit: 2221b84073f8e593935d65ff3b8a90ba65038b34
Parents: 0a2661a
Author: Guanghao Zhang <zg...@apache.org>
Authored: Fri Jan 26 16:50:48 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Feb 10 17:49:13 2018 +0800
----------------------------------------------------------------------
.../replication/ReplicationPeerConfigUtil.java | 22 ++++++++++++++---
.../hbase/replication/SyncReplicationState.java | 17 +++++++++++++
.../hbase/shaded/protobuf/RequestConverter.java | 7 +++---
.../src/main/protobuf/Replication.proto | 13 ++++++----
.../replication/ZKReplicationPeerStorage.java | 25 +++++++++-----------
.../hadoop/hbase/master/MasterRpcServices.java | 9 ++++---
...ransitPeerSyncReplicationStateProcedure.java | 9 ++++---
.../TestReplicationSourceManager.java | 2 +-
8 files changed, 67 insertions(+), 37 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/2221b840/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 86b49ea..5096824 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -398,7 +398,7 @@ public final class ReplicationPeerConfigUtil {
ReplicationProtos.ReplicationState.State.ENABLED == desc.getState().getState();
ReplicationPeerConfig config = convert(desc.getConfig());
return new ReplicationPeerDescription(desc.getId(), enabled, config,
- SyncReplicationState.valueOf(desc.getSyncReplicationState().getNumber()));
+ toSyncReplicationState(desc.getSyncReplicationState()));
}
public static ReplicationProtos.ReplicationPeerDescription
@@ -406,17 +406,33 @@ public final class ReplicationPeerConfigUtil {
ReplicationProtos.ReplicationPeerDescription.Builder builder =
ReplicationProtos.ReplicationPeerDescription.newBuilder();
builder.setId(desc.getPeerId());
+
ReplicationProtos.ReplicationState.Builder stateBuilder =
ReplicationProtos.ReplicationState.newBuilder();
stateBuilder.setState(desc.isEnabled() ? ReplicationProtos.ReplicationState.State.ENABLED :
ReplicationProtos.ReplicationState.State.DISABLED);
builder.setState(stateBuilder.build());
+
builder.setConfig(convert(desc.getPeerConfig()));
- builder.setSyncReplicationState(
- ReplicationProtos.SyncReplicationState.forNumber(desc.getSyncReplicationState().ordinal()));
+ builder.setSyncReplicationState(toSyncReplicationState(desc.getSyncReplicationState()));
+
return builder.build();
}
+ public static ReplicationProtos.SyncReplicationState
+ toSyncReplicationState(SyncReplicationState state) {
+ ReplicationProtos.SyncReplicationState.Builder syncReplicationStateBuilder =
+ ReplicationProtos.SyncReplicationState.newBuilder();
+ syncReplicationStateBuilder
+ .setState(ReplicationProtos.SyncReplicationState.State.forNumber(state.ordinal()));
+ return syncReplicationStateBuilder.build();
+ }
+
+ public static SyncReplicationState
+ toSyncReplicationState(ReplicationProtos.SyncReplicationState state) {
+ return SyncReplicationState.valueOf(state.getState().getNumber());
+ }
+
public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig(
Map<TableName, List<String>> tableCfs, ReplicationPeerConfig peerConfig) {
ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder(peerConfig);
http://git-wip-us.apache.org/repos/asf/hbase/blob/2221b840/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
index bd144e9..a65b144 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
@@ -17,8 +17,15 @@
*/
package org.apache.hadoop.hbase.replication;
+import java.util.Arrays;
+import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+
/**
* Used by synchronous replication. Indicate the state of the current cluster in a synchronous
* replication peer. The state may be one of {@link SyncReplicationState#ACTIVE},
@@ -45,4 +52,14 @@ public enum SyncReplicationState {
throw new IllegalArgumentException("Unknown synchronous replication state " + value);
}
}
+
+ public static byte[] toByteArray(SyncReplicationState state) {
+ return ProtobufUtil
+ .prependPBMagic(ReplicationPeerConfigUtil.toSyncReplicationState(state).toByteArray());
+ }
+
+ public static SyncReplicationState parseFrom(byte[] bytes) throws InvalidProtocolBufferException {
+ return ReplicationPeerConfigUtil.toSyncReplicationState(ReplicationProtos.SyncReplicationState
+ .parseFrom(Arrays.copyOfRange(bytes, ProtobufUtil.lengthOfPBMagic(), bytes.length)));
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/2221b840/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 659be2a..6ebcc89 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -1878,10 +1878,9 @@ public final class RequestConverter {
}
public static TransitReplicationPeerSyncReplicationStateRequest
- buildTransitReplicationPeerSyncReplicationStateRequest(String peerId,
- SyncReplicationState state) {
+ buildTransitReplicationPeerSyncReplicationStateRequest(String peerId,
+ SyncReplicationState state) {
return TransitReplicationPeerSyncReplicationStateRequest.newBuilder().setPeerId(peerId)
- .setSyncReplicationState(ReplicationProtos.SyncReplicationState.forNumber(state.ordinal()))
- .build();
+ .setSyncReplicationState(ReplicationPeerConfigUtil.toSyncReplicationState(state)).build();
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/2221b840/hbase-protocol-shaded/src/main/protobuf/Replication.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
index de7b742..82a242d 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Replication.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
@@ -65,11 +65,14 @@ message ReplicationState {
/**
* Indicate the state of the current cluster in a synchronous replication peer.
*/
-enum SyncReplicationState {
- NONE = 0;
- ACTIVE = 1;
- DOWNGRADE_ACTIVE = 2;
- STANDBY = 3;
+message SyncReplicationState {
+ enum State {
+ NONE = 0;
+ ACTIVE = 1;
+ DOWNGRADE_ACTIVE = 2;
+ STANDBY = 3;
+ }
+ required State state = 1;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/2221b840/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
index 338ce3f..909daa0 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
@@ -17,12 +17,12 @@
*/
package org.apache.hadoop.hbase.replication;
+import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
@@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
/**
@@ -96,7 +95,7 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
ZKUtilOp.createAndFailSilent(getPeerStateNode(peerId),
enabled ? ENABLED_ZNODE_BYTES : DISABLED_ZNODE_BYTES),
ZKUtilOp.createAndFailSilent(getSyncReplicationStateNode(peerId),
- Bytes.toBytes(syncReplicationState.ordinal())));
+ SyncReplicationState.toByteArray(syncReplicationState)));
try {
ZKUtil.createWithParents(zookeeper, peersZNode);
ZKUtil.multiOrSequential(zookeeper, multiOps, false);
@@ -179,29 +178,27 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
}
@Override
- public void setPeerSyncReplicationState(String peerId, SyncReplicationState clusterState)
+ public void setPeerSyncReplicationState(String peerId, SyncReplicationState state)
throws ReplicationException {
- byte[] clusterStateBytes = Bytes.toBytes(clusterState.ordinal());
try {
- ZKUtil.setData(zookeeper, getSyncReplicationStateNode(peerId), clusterStateBytes);
+ ZKUtil.setData(zookeeper, getSyncReplicationStateNode(peerId),
+ SyncReplicationState.toByteArray(state));
} catch (KeeperException e) {
throw new ReplicationException(
- "Unable to change the cluster state for the synchronous replication peer with id=" +
- peerId,
- e);
+ "Unable to change the cluster state for the synchronous replication peer with id=" + peerId,
+ e);
}
}
@Override
public SyncReplicationState getPeerSyncReplicationState(String peerId)
throws ReplicationException {
- byte[] data;
try {
- data = ZKUtil.getData(zookeeper, getSyncReplicationStateNode(peerId));
- } catch (KeeperException | InterruptedException e) {
+ byte[] data = ZKUtil.getData(zookeeper, getSyncReplicationStateNode(peerId));
+ return SyncReplicationState.parseFrom(data);
+ } catch (KeeperException | InterruptedException | IOException e) {
throw new ReplicationException(
- "Error getting cluster state for the synchronous replication peer with id=" + peerId, e);
+ "Error getting cluster state for the synchronous replication peer with id=" + peerId, e);
}
- return SyncReplicationState.valueOf(Bytes.toInt(data));
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/2221b840/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index f5bf117..e19d6ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -83,7 +83,6 @@ import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessChecker;
import org.apache.hadoop.hbase.security.access.AccessController;
@@ -1962,13 +1961,13 @@ public class MasterRpcServices extends RSRpcServices
@Override
public TransitReplicationPeerSyncReplicationStateResponse
- transitReplicationPeerSyncReplicationState(RpcController controller,
- TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException {
+ transitReplicationPeerSyncReplicationState(RpcController controller,
+ TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException {
try {
long procId = master.transitReplicationPeerSyncReplicationState(request.getPeerId(),
- SyncReplicationState.valueOf(request.getSyncReplicationState().getNumber()));
+ ReplicationPeerConfigUtil.toSyncReplicationState(request.getSyncReplicationState()));
return TransitReplicationPeerSyncReplicationStateResponse.newBuilder().setProcId(procId)
- .build();
+ .build();
} catch (ReplicationException | IOException e) {
throw new ServiceException(e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/2221b840/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index d26eecc..aad3b06 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.replication;
import java.io.IOException;
+import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
@@ -33,7 +34,6 @@ import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TransitPeerSyncReplicationStateStateData;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
/**
* The procedure for transit current cluster state for a synchronous replication peer.
@@ -89,16 +89,15 @@ public class TransitPeerSyncReplicationStateProcedure extends ModifyPeerProcedur
protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
super.serializeStateData(serializer);
serializer.serialize(TransitPeerSyncReplicationStateStateData.newBuilder()
- .setSyncReplicationState(ReplicationProtos.SyncReplicationState.forNumber(state.ordinal()))
- .build());
+ .setSyncReplicationState(ReplicationPeerConfigUtil.toSyncReplicationState(state)).build());
}
@Override
protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
super.deserializeStateData(serializer);
TransitPeerSyncReplicationStateStateData data =
- serializer.deserialize(TransitPeerSyncReplicationStateStateData.class);
- state = SyncReplicationState.valueOf(data.getSyncReplicationState().getNumber());
+ serializer.deserialize(TransitPeerSyncReplicationStateStateData.class);
+ state = ReplicationPeerConfigUtil.toSyncReplicationState(data.getSyncReplicationState());
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/2221b840/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 1204c9b..3d7cdaf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -174,7 +174,7 @@ public abstract class TestReplicationSourceManager {
ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/sync-rep-state");
ZKUtil.setData(zkw, "/hbase/replication/peers/1/sync-rep-state",
- Bytes.toBytes(SyncReplicationState.NONE.ordinal()));
+ SyncReplicationState.toByteArray(SyncReplicationState.NONE));
ZKUtil.createWithParents(zkw, "/hbase/replication/state");
ZKUtil.setData(zkw, "/hbase/replication/state", ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
[05/11] hbase git commit: HBASE-19957 General framework to transit
sync replication state
Posted by zh...@apache.org.
HBASE-19957 General framework to transit sync replication state
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/03057e19
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/03057e19
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/03057e19
Branch: refs/heads/HBASE-19064
Commit: 03057e19726eba73a43a8b321416043211db2b34
Parents: 419cb01
Author: zhangduo <zh...@apache.org>
Authored: Fri Feb 9 18:33:28 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Feb 10 17:49:13 2018 +0800
----------------------------------------------------------------------
.../replication/ReplicationPeerConfig.java | 2 -
.../replication/ReplicationPeerDescription.java | 5 +-
.../hbase/replication/SyncReplicationState.java | 19 +-
.../org/apache/hadoop/hbase/HConstants.java | 3 +
.../src/main/protobuf/MasterProcedure.proto | 20 ++-
.../hbase/replication/ReplicationPeerImpl.java | 45 ++++-
.../replication/ReplicationPeerStorage.java | 25 ++-
.../hbase/replication/ReplicationPeers.java | 27 ++-
.../replication/ZKReplicationPeerStorage.java | 65 +++++--
.../hbase/coprocessor/MasterObserver.java | 7 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 4 +-
.../hbase/master/MasterCoprocessorHost.java | 12 +-
.../replication/AbstractPeerProcedure.java | 14 +-
.../master/replication/ModifyPeerProcedure.java | 15 +-
.../replication/RefreshPeerProcedure.java | 18 +-
.../replication/ReplicationPeerManager.java | 107 +++++++-----
...ransitPeerSyncReplicationStateProcedure.java | 175 ++++++++++++-------
.../hbase/regionserver/HRegionServer.java | 35 ++--
.../regionserver/ReplicationSourceService.java | 11 +-
.../regionserver/PeerActionListener.java | 4 +-
.../regionserver/PeerProcedureHandler.java | 16 +-
.../regionserver/PeerProcedureHandlerImpl.java | 55 +++++-
.../regionserver/RefreshPeerCallable.java | 7 +
.../replication/regionserver/Replication.java | 22 ++-
.../regionserver/ReplicationSourceManager.java | 41 +++--
.../SyncReplicationPeerInfoProvider.java | 43 +++++
.../SyncReplicationPeerInfoProviderImpl.java | 71 ++++++++
.../SyncReplicationPeerMappingManager.java | 48 +++++
.../SyncReplicationPeerProvider.java | 35 ----
.../hbase/wal/SyncReplicationWALProvider.java | 35 ++--
.../org/apache/hadoop/hbase/wal/WALFactory.java | 47 ++---
.../replication/TestReplicationAdmin.java | 3 +-
.../TestReplicationSourceManager.java | 5 +-
.../wal/TestSyncReplicationWALProvider.java | 36 ++--
34 files changed, 752 insertions(+), 325 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 69565a7..79b3a1d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hbase.replication;
import java.util.Collection;
@@ -25,7 +24,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
-
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
index 2d077c5..b0c27bb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.hbase.replication;
import org.apache.yetus.audience.InterfaceAudience;
/**
- * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription
+ * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription.
+ * <p>
+ * To developer, here we do not store the new sync replication state since it is just an
+ * intermediate state and this class is public.
*/
@InterfaceAudience.Public
public class ReplicationPeerDescription {
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
index a65b144..de9576c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
@@ -29,14 +29,19 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
/**
* Used by synchronous replication. Indicate the state of the current cluster in a synchronous
* replication peer. The state may be one of {@link SyncReplicationState#ACTIVE},
- * {@link SyncReplicationState#DOWNGRADE_ACTIVE} or
- * {@link SyncReplicationState#STANDBY}.
+ * {@link SyncReplicationState#DOWNGRADE_ACTIVE} or {@link SyncReplicationState#STANDBY}.
* <p>
* For asynchronous replication, the state is {@link SyncReplicationState#NONE}.
*/
@InterfaceAudience.Public
public enum SyncReplicationState {
- NONE, ACTIVE, DOWNGRADE_ACTIVE, STANDBY;
+ NONE(0), ACTIVE(1), DOWNGRADE_ACTIVE(2), STANDBY(3);
+
+ private final byte value;
+
+ private SyncReplicationState(int value) {
+ this.value = (byte) value;
+ }
public static SyncReplicationState valueOf(int value) {
switch (value) {
@@ -53,13 +58,17 @@ public enum SyncReplicationState {
}
}
+ public int value() {
+ return value & 0xFF;
+ }
+
public static byte[] toByteArray(SyncReplicationState state) {
return ProtobufUtil
- .prependPBMagic(ReplicationPeerConfigUtil.toSyncReplicationState(state).toByteArray());
+ .prependPBMagic(ReplicationPeerConfigUtil.toSyncReplicationState(state).toByteArray());
}
public static SyncReplicationState parseFrom(byte[] bytes) throws InvalidProtocolBufferException {
return ReplicationPeerConfigUtil.toSyncReplicationState(ReplicationProtos.SyncReplicationState
- .parseFrom(Arrays.copyOfRange(bytes, ProtobufUtil.lengthOfPBMagic(), bytes.length)));
+ .parseFrom(Arrays.copyOfRange(bytes, ProtobufUtil.lengthOfPBMagic(), bytes.length)));
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 1cd6f89..4664778 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1351,6 +1351,9 @@ public final class HConstants {
public static final String NOT_IMPLEMENTED = "Not implemented";
+ // TODO: need to find a better place to hold it.
+ public static final String SYNC_REPLICATION_ENABLED = "hbase.replication.sync.enabled";
+
private HConstants() {
// Can't be instantiated with this ctor.
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 1dffd33..8cc5c81 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -374,6 +374,17 @@ enum PeerModificationState {
POST_PEER_MODIFICATION = 4;
}
+enum PeerSyncReplicationStateTransitionState {
+ PRE_PEER_SYNC_REPLICATION_STATE_TRANSITION = 1;
+ SET_PEER_NEW_SYNC_REPLICATION_STATE = 2;
+ REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN = 3;
+ REPLAY_REMOTE_WAL_IN_PEER = 4;
+ REOPEN_ALL_REGIONS_IN_PEER = 5;
+ TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
+ REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
+ POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 8;
+}
+
message PeerModificationStateData {
required string peer_id = 1;
}
@@ -384,18 +395,23 @@ enum PeerModificationType {
ENABLE_PEER = 3;
DISABLE_PEER = 4;
UPDATE_PEER_CONFIG = 5;
+ TRANSIT_SYNC_REPLICATION_STATE = 6;
}
message RefreshPeerStateData {
required string peer_id = 1;
required PeerModificationType type = 2;
required ServerName target_server = 3;
+ /** We need multiple stages for sync replication state transition **/
+ optional uint32 stage = 4 [default = 0];
}
message RefreshPeerParameter {
required string peer_id = 1;
required PeerModificationType type = 2;
required ServerName target_server = 3;
+ /** We need multiple stages for sync replication state transition **/
+ optional uint32 stage = 4 [default = 0];;
}
message PeerProcedureStateData {
@@ -412,5 +428,7 @@ message UpdatePeerConfigStateData {
}
message TransitPeerSyncReplicationStateStateData {
- required SyncReplicationState syncReplicationState = 1;
+ /** Could be null if we fail in pre check, so optional */
+ optional SyncReplicationState fromState = 1;
+ required SyncReplicationState toState = 2;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index ff3f662..22026e5 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -23,6 +23,7 @@ import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
@@ -36,7 +37,14 @@ public class ReplicationPeerImpl implements ReplicationPeer {
private volatile PeerState peerState;
- private volatile SyncReplicationState syncReplicationState;
+ // The lower 16 bits are the current sync replication state, the higher 16 bits are the new sync
+ // replication state. Embedded in one int so user can not get an inconsistency view of state and
+ // new state.
+ private volatile int syncReplicationStateBits;
+
+ private static final int SHIFT = 16;
+
+ private static final int AND_BITS = 0xFFFF;
private final List<ReplicationPeerConfigListener> peerConfigListeners;
@@ -48,12 +56,14 @@ public class ReplicationPeerImpl implements ReplicationPeer {
* @param peerConfig configuration for the replication peer
*/
public ReplicationPeerImpl(Configuration conf, String id, ReplicationPeerConfig peerConfig,
- boolean peerState, SyncReplicationState syncReplicationState) {
+ boolean peerState, SyncReplicationState syncReplicationState,
+ SyncReplicationState newSyncReplicationState) {
this.conf = conf;
this.id = id;
this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED;
this.peerConfig = peerConfig;
- this.syncReplicationState = syncReplicationState;
+ this.syncReplicationStateBits =
+ syncReplicationState.value() | (newSyncReplicationState.value() << SHIFT);
this.peerConfigListeners = new ArrayList<>();
}
@@ -66,6 +76,16 @@ public class ReplicationPeerImpl implements ReplicationPeer {
peerConfigListeners.forEach(listener -> listener.peerConfigUpdated(peerConfig));
}
+ public void setNewSyncReplicationState(SyncReplicationState newState) {
+ this.syncReplicationStateBits =
+ (this.syncReplicationStateBits & AND_BITS) | (newState.value() << SHIFT);
+ }
+
+ public void transitSyncReplicationState() {
+ this.syncReplicationStateBits =
+ (this.syncReplicationStateBits >>> SHIFT) | (SyncReplicationState.NONE.value() << SHIFT);
+ }
+
/**
* Get the identifier of this peer
* @return string representation of the id (short)
@@ -80,9 +100,26 @@ public class ReplicationPeerImpl implements ReplicationPeer {
return peerState;
}
+ private static SyncReplicationState getSyncReplicationState(int bits) {
+ return SyncReplicationState.valueOf(bits & AND_BITS);
+ }
+
+ private static SyncReplicationState getNewSyncReplicationState(int bits) {
+ return SyncReplicationState.valueOf(bits >>> SHIFT);
+ }
+
+ public Pair<SyncReplicationState, SyncReplicationState> getSyncReplicationStateAndNewState() {
+ int bits = this.syncReplicationStateBits;
+ return Pair.newPair(getSyncReplicationState(bits), getNewSyncReplicationState(bits));
+ }
+
+ public SyncReplicationState getNewSyncReplicationState() {
+ return getNewSyncReplicationState(syncReplicationStateBits);
+ }
+
@Override
public SyncReplicationState getSyncReplicationState() {
- return syncReplicationState;
+ return getSyncReplicationState(syncReplicationStateBits);
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
index d2538ab..f74ac37 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.replication;
import java.util.List;
-
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -72,16 +71,30 @@ public interface ReplicationPeerStorage {
ReplicationPeerConfig getPeerConfig(String peerId) throws ReplicationException;
/**
- * Set the state of current cluster in a synchronous replication peer.
+ * Set the new sync replication state that we are going to transit to.
* @throws ReplicationException if there are errors accessing the storage service.
*/
- void setPeerSyncReplicationState(String peerId, SyncReplicationState state)
+ void setPeerNewSyncReplicationState(String peerId, SyncReplicationState state)
throws ReplicationException;
/**
- * Get the state of current cluster in a synchronous replication peer.
+ * Overwrite the sync replication state with the new sync replication state which is set with the
+ * {@link #setPeerNewSyncReplicationState(String, SyncReplicationState)} method above, and clear
+ * the new sync replication state.
* @throws ReplicationException if there are errors accessing the storage service.
*/
- SyncReplicationState getPeerSyncReplicationState(String peerId)
- throws ReplicationException;
+ void transitPeerSyncReplicationState(String peerId) throws ReplicationException;
+
+ /**
+ * Get the sync replication state.
+ * @throws ReplicationException if there are errors accessing the storage service.
+ */
+ SyncReplicationState getPeerSyncReplicationState(String peerId) throws ReplicationException;
+
+ /**
+ * Get the new sync replication state. Will return {@link SyncReplicationState#NONE} if we are
+ * not in a transition.
+ * @throws ReplicationException if there are errors accessing the storage service.
+ */
+ SyncReplicationState getPeerNewSyncReplicationState(String peerId) throws ReplicationException;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index f120dbc..d2a5599 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -79,8 +79,8 @@ public class ReplicationPeers {
return true;
}
- public void removePeer(String peerId) {
- peerCache.remove(peerId);
+ public ReplicationPeerImpl removePeer(String peerId) {
+ return peerCache.remove(peerId);
}
/**
@@ -105,22 +105,29 @@ public class ReplicationPeers {
public PeerState refreshPeerState(String peerId) throws ReplicationException {
ReplicationPeerImpl peer = peerCache.get(peerId);
- if (peer == null) {
- throw new ReplicationException("Peer with id=" + peerId + " is not cached.");
- }
peer.setPeerState(peerStorage.isPeerEnabled(peerId));
return peer.getPeerState();
}
public ReplicationPeerConfig refreshPeerConfig(String peerId) throws ReplicationException {
ReplicationPeerImpl peer = peerCache.get(peerId);
- if (peer == null) {
- throw new ReplicationException("Peer with id=" + peerId + " is not cached.");
- }
peer.setPeerConfig(peerStorage.getPeerConfig(peerId));
return peer.getPeerConfig();
}
+ public SyncReplicationState refreshPeerNewSyncReplicationState(String peerId)
+ throws ReplicationException {
+ ReplicationPeerImpl peer = peerCache.get(peerId);
+ SyncReplicationState newState = peerStorage.getPeerNewSyncReplicationState(peerId);
+ peer.setNewSyncReplicationState(newState);
+ return newState;
+ }
+
+ public void transitPeerSyncReplicationState(String peerId) {
+ ReplicationPeerImpl peer = peerCache.get(peerId);
+ peer.transitSyncReplicationState();
+ }
+
/**
* Helper method to connect to a peer
* @param peerId peer's identifier
@@ -130,7 +137,9 @@ public class ReplicationPeers {
ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId);
boolean enabled = peerStorage.isPeerEnabled(peerId);
SyncReplicationState syncReplicationState = peerStorage.getPeerSyncReplicationState(peerId);
+ SyncReplicationState newSyncReplicationState =
+ peerStorage.getPeerNewSyncReplicationState(peerId);
return new ReplicationPeerImpl(ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf),
- peerId, peerConfig, enabled, syncReplicationState);
+ peerId, peerConfig, enabled, syncReplicationState, newSyncReplicationState);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
index 909daa0..9241ce3 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
@@ -30,7 +30,9 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
/**
@@ -51,7 +53,12 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
public static final byte[] DISABLED_ZNODE_BYTES =
toByteArray(ReplicationProtos.ReplicationState.State.DISABLED);
- public static final String SYNCHRONOUS_REPLICATION_STATE_ZNODE = "sync-rep-state";
+ public static final String SYNC_REPLICATION_STATE_ZNODE = "sync-rep-state";
+
+ public static final String NEW_SYNC_REPLICATION_STATE_ZNODE = "new-sync-rep-state";
+
+ public static final byte[] NONE_STATE_ZNODE_BYTES =
+ SyncReplicationState.toByteArray(SyncReplicationState.NONE);
/**
* The name of the znode that contains the replication status of a remote slave (i.e. peer)
@@ -83,7 +90,11 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
@VisibleForTesting
public String getSyncReplicationStateNode(String peerId) {
- return ZNodePaths.joinZNode(getPeerNode(peerId), SYNCHRONOUS_REPLICATION_STATE_ZNODE);
+ return ZNodePaths.joinZNode(getPeerNode(peerId), SYNC_REPLICATION_STATE_ZNODE);
+ }
+
+ private String getNewSyncReplicationStateNode(String peerId) {
+ return ZNodePaths.joinZNode(getPeerNode(peerId), NEW_SYNC_REPLICATION_STATE_ZNODE);
}
@Override
@@ -95,14 +106,15 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
ZKUtilOp.createAndFailSilent(getPeerStateNode(peerId),
enabled ? ENABLED_ZNODE_BYTES : DISABLED_ZNODE_BYTES),
ZKUtilOp.createAndFailSilent(getSyncReplicationStateNode(peerId),
- SyncReplicationState.toByteArray(syncReplicationState)));
+ SyncReplicationState.toByteArray(syncReplicationState)),
+ ZKUtilOp.createAndFailSilent(getNewSyncReplicationStateNode(peerId), NONE_STATE_ZNODE_BYTES));
try {
ZKUtil.createWithParents(zookeeper, peersZNode);
ZKUtil.multiOrSequential(zookeeper, multiOps, false);
} catch (KeeperException e) {
throw new ReplicationException(
"Could not add peer with id=" + peerId + ", peerConfig=>" + peerConfig + ", state=" +
- (enabled ? "ENABLED" : "DISABLED") + ", syncReplicationState=" + syncReplicationState,
+ (enabled ? "ENABLED" : "DISABLED") + ", syncReplicationState=" + syncReplicationState,
e);
}
}
@@ -134,7 +146,7 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
ReplicationPeerConfigUtil.toByteArray(peerConfig));
} catch (KeeperException e) {
throw new ReplicationException(
- "There was a problem trying to save changes to the " + "replication peer " + peerId, e);
+ "There was a problem trying to save changes to the " + "replication peer " + peerId, e);
}
}
@@ -167,38 +179,63 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
}
if (data == null || data.length == 0) {
throw new ReplicationException(
- "Replication peer config data shouldn't be empty, peerId=" + peerId);
+ "Replication peer config data shouldn't be empty, peerId=" + peerId);
}
try {
return ReplicationPeerConfigUtil.parsePeerFrom(data);
} catch (DeserializationException e) {
throw new ReplicationException(
- "Failed to parse replication peer config for peer with id=" + peerId, e);
+ "Failed to parse replication peer config for peer with id=" + peerId, e);
}
}
@Override
- public void setPeerSyncReplicationState(String peerId, SyncReplicationState state)
+ public void setPeerNewSyncReplicationState(String peerId, SyncReplicationState state)
throws ReplicationException {
try {
- ZKUtil.setData(zookeeper, getSyncReplicationStateNode(peerId),
+ ZKUtil.createSetData(zookeeper, getNewSyncReplicationStateNode(peerId),
SyncReplicationState.toByteArray(state));
} catch (KeeperException e) {
throw new ReplicationException(
- "Unable to change the cluster state for the synchronous replication peer with id=" + peerId,
- e);
+ "Unable to set the new sync replication state for peer with id=" + peerId, e);
}
}
@Override
- public SyncReplicationState getPeerSyncReplicationState(String peerId)
+ public void transitPeerSyncReplicationState(String peerId) throws ReplicationException {
+ String newStateNode = getNewSyncReplicationStateNode(peerId);
+ try {
+ byte[] data = ZKUtil.getData(zookeeper, newStateNode);
+ ZKUtil.multiOrSequential(zookeeper,
+ Arrays.asList(ZKUtilOp.setData(newStateNode, NONE_STATE_ZNODE_BYTES),
+ ZKUtilOp.setData(getSyncReplicationStateNode(peerId), data)),
+ false);
+ } catch (KeeperException | InterruptedException e) {
+ throw new ReplicationException(
+ "Error transiting sync replication state for peer with id=" + peerId, e);
+ }
+ }
+
+ private SyncReplicationState getSyncReplicationState(String peerId, String path)
throws ReplicationException {
try {
- byte[] data = ZKUtil.getData(zookeeper, getSyncReplicationStateNode(peerId));
+ byte[] data = ZKUtil.getData(zookeeper, path);
return SyncReplicationState.parseFrom(data);
} catch (KeeperException | InterruptedException | IOException e) {
throw new ReplicationException(
- "Error getting cluster state for the synchronous replication peer with id=" + peerId, e);
+ "Error getting sync replication state of path " + path + " for peer with id=" + peerId, e);
}
}
+
+ @Override
+ public SyncReplicationState getPeerNewSyncReplicationState(String peerId)
+ throws ReplicationException {
+ return getSyncReplicationState(peerId, getNewSyncReplicationStateNode(peerId));
+ }
+
+ @Override
+ public SyncReplicationState getPeerSyncReplicationState(String peerId)
+ throws ReplicationException {
+ return getSyncReplicationState(peerId, getSyncReplicationStateNode(peerId));
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 8d2b55f..ba340cb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -1236,7 +1236,7 @@ public interface MasterObserver {
* Called before transit current cluster state for the specified synchronous replication peer
* @param ctx the environment to interact with the framework and master
* @param peerId a short name that identifies the peer
- * @param state a new state
+ * @param state the new state
*/
default void preTransitReplicationPeerSyncReplicationState(
final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
@@ -1247,11 +1247,12 @@ public interface MasterObserver {
* Called after transit current cluster state for the specified synchronous replication peer
* @param ctx the environment to interact with the framework and master
* @param peerId a short name that identifies the peer
- * @param state a new state
+ * @param from the old state
+ * @param to the new state
*/
default void postTransitReplicationPeerSyncReplicationState(
final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
- SyncReplicationState state) throws IOException {
+ SyncReplicationState from, SyncReplicationState to) throws IOException {
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ddffbec..297b1f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -129,10 +129,10 @@ import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure;
import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
+import org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
import org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
import org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
import org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
-import org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure;
import org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
import org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
@@ -3347,7 +3347,7 @@ public class HMaster extends HRegionServer implements MasterServices {
return favoredNodesManager;
}
- private long executePeerProcedure(ModifyPeerProcedure procedure) throws IOException {
+ private long executePeerProcedure(AbstractPeerProcedure<?> procedure) throws IOException {
long procId = procedureExecutor.submitProcedure(procedure);
procedure.getLatch().await();
return procId;
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index ba5e70a..bf60066 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -1525,22 +1525,22 @@ public class MasterCoprocessorHost
});
}
- public void preTransitReplicationPeerSyncReplicationState(final String peerId,
- final SyncReplicationState clusterState) throws IOException {
+ public void preTransitReplicationPeerSyncReplicationState(String peerId,
+ SyncReplicationState state) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
@Override
public void call(MasterObserver observer) throws IOException {
- observer.preTransitReplicationPeerSyncReplicationState(this, peerId, clusterState);
+ observer.preTransitReplicationPeerSyncReplicationState(this, peerId, state);
}
});
}
- public void postTransitReplicationPeerSyncReplicationState(final String peerId,
- final SyncReplicationState clusterState) throws IOException {
+ public void postTransitReplicationPeerSyncReplicationState(String peerId,
+ SyncReplicationState from, SyncReplicationState to) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
@Override
public void call(MasterObserver observer) throws IOException {
- observer.postTransitReplicationPeerSyncReplicationState(this, peerId, clusterState);
+ observer.postTransitReplicationPeerSyncReplicationState(this, peerId, from, to);
}
});
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
index 0ad8a63..6679d78 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -46,7 +46,7 @@ public abstract class AbstractPeerProcedure<TState>
protected AbstractPeerProcedure(String peerId) {
this.peerId = peerId;
- this.latch = ProcedurePrepareLatch.createLatch(2, 0);
+ this.latch = ProcedurePrepareLatch.createLatch(2, 1);
}
public ProcedurePrepareLatch getLatch() {
@@ -94,4 +94,16 @@ public abstract class AbstractPeerProcedure<TState>
super.deserializeStateData(serializer);
peerId = serializer.deserialize(PeerProcedureStateData.class).getPeerId();
}
+
+ @Override
+ protected void rollbackState(MasterProcedureEnv env, TState state)
+ throws IOException, InterruptedException {
+ if (state == getInitialState()) {
+ // actually the peer related operations has no rollback, but if we haven't done any
+ // modifications on the peer storage yet, we can just return.
+ return;
+ }
+ throw new UnsupportedOperationException();
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index 83c5134..ac5d2af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -105,8 +105,8 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
return Flow.HAS_MORE_STATE;
case REFRESH_PEER_ON_RS:
addChildProcedure(env.getMasterServices().getServerManager().getOnlineServersList().stream()
- .map(sn -> new RefreshPeerProcedure(peerId, getPeerOperationType(), sn))
- .toArray(RefreshPeerProcedure[]::new));
+ .map(sn -> new RefreshPeerProcedure(peerId, getPeerOperationType(), sn))
+ .toArray(RefreshPeerProcedure[]::new));
setNextState(PeerModificationState.POST_PEER_MODIFICATION);
return Flow.HAS_MORE_STATE;
case POST_PEER_MODIFICATION:
@@ -128,17 +128,6 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
}
@Override
- protected void rollbackState(MasterProcedureEnv env, PeerModificationState state)
- throws IOException, InterruptedException {
- if (state == PeerModificationState.PRE_PEER_MODIFICATION) {
- // actually the peer related operations has no rollback, but if we haven't done any
- // modifications on the peer storage yet, we can just return.
- return;
- }
- throw new UnsupportedOperationException();
- }
-
- @Override
protected PeerModificationState getState(int stateId) {
return PeerModificationState.forNumber(stateId);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
index 1253ef9..b6825a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
@@ -52,6 +52,8 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
private ServerName targetServer;
+ private int stage;
+
private boolean dispatched;
private ProcedureEvent<?> event;
@@ -62,9 +64,15 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
}
public RefreshPeerProcedure(String peerId, PeerOperationType type, ServerName targetServer) {
+ this(peerId, type, targetServer, 0);
+ }
+
+ public RefreshPeerProcedure(String peerId, PeerOperationType type, ServerName targetServer,
+ int stage) {
this.peerId = peerId;
this.type = type;
this.targetServer = targetServer;
+ this.stage = stage;
}
@Override
@@ -89,6 +97,8 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
return PeerModificationType.DISABLE_PEER;
case UPDATE_CONFIG:
return PeerModificationType.UPDATE_PEER_CONFIG;
+ case TRANSIT_SYNC_REPLICATION_STATE:
+ return PeerModificationType.TRANSIT_SYNC_REPLICATION_STATE;
default:
throw new IllegalArgumentException("Unknown type: " + type);
}
@@ -106,6 +116,8 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
return PeerOperationType.DISABLE;
case UPDATE_PEER_CONFIG:
return PeerOperationType.UPDATE_CONFIG;
+ case TRANSIT_SYNC_REPLICATION_STATE:
+ return PeerOperationType.TRANSIT_SYNC_REPLICATION_STATE;
default:
throw new IllegalArgumentException("Unknown type: " + type);
}
@@ -116,7 +128,8 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
assert targetServer.equals(remote);
return new ServerOperation(this, getProcId(), RefreshPeerCallable.class,
RefreshPeerParameter.newBuilder().setPeerId(peerId).setType(toPeerModificationType(type))
- .setTargetServer(ProtobufUtil.toServerName(remote)).build().toByteArray());
+ .setTargetServer(ProtobufUtil.toServerName(remote)).setStage(stage).build()
+ .toByteArray());
}
private void complete(MasterProcedureEnv env, Throwable error) {
@@ -191,7 +204,7 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
serializer.serialize(
RefreshPeerStateData.newBuilder().setPeerId(peerId).setType(toPeerModificationType(type))
- .setTargetServer(ProtobufUtil.toServerName(targetServer)).build());
+ .setTargetServer(ProtobufUtil.toServerName(targetServer)).setStage(stage).build());
}
@Override
@@ -200,5 +213,6 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
peerId = data.getPeerId();
type = toPeerOperationType(data.getType());
targetServer = ProtobufUtil.toServerName(data.getTargetServer());
+ stage = data.getStage();
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 6bfd9c9..d80e9a4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.master.replication;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.EnumMap;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
@@ -50,6 +49,9 @@ import org.apache.hadoop.hbase.zookeeper.ZKConfig;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
+import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
+
/**
* Manages and performs all replication admin operations.
* <p>
@@ -64,15 +66,11 @@ public class ReplicationPeerManager {
private final ConcurrentMap<String, ReplicationPeerDescription> peers;
- private final EnumMap<SyncReplicationState, EnumSet<SyncReplicationState>> allowedTransition =
- new EnumMap<SyncReplicationState, EnumSet<SyncReplicationState>>(SyncReplicationState.class) {
- {
- put(SyncReplicationState.ACTIVE, EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE));
- put(SyncReplicationState.STANDBY, EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE));
- put(SyncReplicationState.DOWNGRADE_ACTIVE,
- EnumSet.of(SyncReplicationState.STANDBY, SyncReplicationState.ACTIVE));
- }
- };
+ private final ImmutableMap<SyncReplicationState, EnumSet<SyncReplicationState>>
+ allowedTransition = Maps.immutableEnumMap(ImmutableMap.of(SyncReplicationState.ACTIVE,
+ EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), SyncReplicationState.STANDBY,
+ EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), SyncReplicationState.DOWNGRADE_ACTIVE,
+ EnumSet.of(SyncReplicationState.STANDBY, SyncReplicationState.ACTIVE)));
ReplicationPeerManager(ReplicationPeerStorage peerStorage, ReplicationQueueStorage queueStorage,
ConcurrentMap<String, ReplicationPeerDescription> peers) {
@@ -147,44 +145,48 @@ public class ReplicationPeerManager {
ReplicationPeerConfig oldPeerConfig = desc.getPeerConfig();
if (!isStringEquals(peerConfig.getClusterKey(), oldPeerConfig.getClusterKey())) {
throw new DoNotRetryIOException(
- "Changing the cluster key on an existing peer is not allowed. Existing key '" +
- oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does not match new key '" +
- peerConfig.getClusterKey() + "'");
+ "Changing the cluster key on an existing peer is not allowed. Existing key '" +
+ oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does not match new key '" +
+ peerConfig.getClusterKey() + "'");
}
if (!isStringEquals(peerConfig.getReplicationEndpointImpl(),
oldPeerConfig.getReplicationEndpointImpl())) {
throw new DoNotRetryIOException("Changing the replication endpoint implementation class " +
- "on an existing peer is not allowed. Existing class '" +
- oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId +
- " does not match new class '" + peerConfig.getReplicationEndpointImpl() + "'");
+ "on an existing peer is not allowed. Existing class '" +
+ oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId +
+ " does not match new class '" + peerConfig.getReplicationEndpointImpl() + "'");
}
if (!isStringEquals(peerConfig.getRemoteWALDir(), oldPeerConfig.getRemoteWALDir())) {
throw new DoNotRetryIOException(
- "Changing the remote wal dir on an existing peer is not allowed. Existing remote wal " +
- "dir '" + oldPeerConfig.getRemoteWALDir() + "' for peer " + peerId +
- " does not match new remote wal dir '" + peerConfig.getRemoteWALDir() + "'");
+ "Changing the remote wal dir on an existing peer is not allowed. Existing remote wal " +
+ "dir '" + oldPeerConfig.getRemoteWALDir() + "' for peer " + peerId +
+ " does not match new remote wal dir '" + peerConfig.getRemoteWALDir() + "'");
}
if (oldPeerConfig.isSyncReplication()) {
if (!ReplicationUtils.isKeyConfigEqual(oldPeerConfig, peerConfig)) {
throw new DoNotRetryIOException(
- "Changing the replicated namespace/table config on a synchronous replication "
- + "peer(peerId: " + peerId + ") is not allowed.");
+ "Changing the replicated namespace/table config on a synchronous replication " +
+ "peer(peerId: " + peerId + ") is not allowed.");
}
}
}
- public void preTransitPeerSyncReplicationState(String peerId, SyncReplicationState state)
- throws DoNotRetryIOException {
+ /**
+ * @return the old state.
+ */
+ public SyncReplicationState preTransitPeerSyncReplicationState(String peerId,
+ SyncReplicationState state) throws DoNotRetryIOException {
ReplicationPeerDescription desc = checkPeerExists(peerId);
SyncReplicationState fromState = desc.getSyncReplicationState();
EnumSet<SyncReplicationState> allowedToStates = allowedTransition.get(fromState);
if (allowedToStates == null || !allowedToStates.contains(state)) {
throw new DoNotRetryIOException("Can not transit current cluster state from " + fromState +
- " to " + state + " for peer id=" + peerId);
+ " to " + state + " for peer id=" + peerId);
}
+ return fromState;
}
public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
@@ -195,8 +197,8 @@ public class ReplicationPeerManager {
}
ReplicationPeerConfig copiedPeerConfig = ReplicationPeerConfig.newBuilder(peerConfig).build();
SyncReplicationState syncReplicationState =
- copiedPeerConfig.isSyncReplication() ? SyncReplicationState.DOWNGRADE_ACTIVE
- : SyncReplicationState.NONE;
+ copiedPeerConfig.isSyncReplication() ? SyncReplicationState.DOWNGRADE_ACTIVE
+ : SyncReplicationState.NONE;
peerStorage.addPeer(peerId, copiedPeerConfig, enabled, syncReplicationState);
peers.put(peerId,
new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig, syncReplicationState));
@@ -236,7 +238,7 @@ public class ReplicationPeerManager {
ReplicationPeerDescription desc = peers.get(peerId);
ReplicationPeerConfig oldPeerConfig = desc.getPeerConfig();
ReplicationPeerConfigBuilder newPeerConfigBuilder =
- ReplicationPeerConfig.newBuilder(peerConfig);
+ ReplicationPeerConfig.newBuilder(peerConfig);
// we need to use the new conf to overwrite the old one.
newPeerConfigBuilder.putAllConfiguration(oldPeerConfig.getConfiguration());
newPeerConfigBuilder.putAllConfiguration(peerConfig.getConfiguration());
@@ -253,7 +255,7 @@ public class ReplicationPeerManager {
return new ArrayList<>(peers.values());
}
return peers.values().stream().filter(r -> pattern.matcher(r.getPeerId()).matches())
- .collect(Collectors.toList());
+ .collect(Collectors.toList());
}
public Optional<ReplicationPeerConfig> getPeerConfig(String peerId) {
@@ -261,12 +263,23 @@ public class ReplicationPeerManager {
return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty();
}
- public void transitPeerSyncReplicationState(String peerId, SyncReplicationState state)
+ public void setPeerNewSyncReplicationState(String peerId, SyncReplicationState state)
+ throws ReplicationException {
+ peerStorage.setPeerNewSyncReplicationState(peerId, state);
+ }
+
+ public void transitPeerSyncReplicationState(String peerId, SyncReplicationState newState)
throws ReplicationException {
+ if (peerStorage.getPeerNewSyncReplicationState(peerId) != SyncReplicationState.NONE) {
+ // Only transit if this is not a retry
+ peerStorage.transitPeerSyncReplicationState(peerId);
+ }
ReplicationPeerDescription desc = peers.get(peerId);
- peerStorage.setPeerSyncReplicationState(peerId, state);
- peers.put(peerId,
- new ReplicationPeerDescription(peerId, desc.isEnabled(), desc.getPeerConfig(), state));
+ if (desc.getSyncReplicationState() != newState) {
+ // Only recreate the desc if this is not a retry
+ peers.put(peerId,
+ new ReplicationPeerDescription(peerId, desc.isEnabled(), desc.getPeerConfig(), newState));
+ }
}
public void removeAllQueuesAndHFileRefs(String peerId) throws ReplicationException {
@@ -293,10 +306,10 @@ public class ReplicationPeerManager {
// If replicate_all flag is true, it means all user tables will be replicated to peer cluster.
// Then allow config exclude namespaces or exclude table-cfs which can't be replicated to peer
// cluster.
- if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty())
- || (peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) {
- throw new DoNotRetryIOException("Need clean namespaces or table-cfs config firstly "
- + "when you want replicate all cluster");
+ if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) ||
+ (peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) {
+ throw new DoNotRetryIOException("Need clean namespaces or table-cfs config firstly " +
+ "when you want replicate all cluster");
}
checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
peerConfig.getExcludeTableCFsMap());
@@ -304,13 +317,13 @@ public class ReplicationPeerManager {
// If replicate_all flag is false, it means all user tables can't be replicated to peer
// cluster. Then allow to config namespaces or table-cfs which will be replicated to peer
// cluster.
- if ((peerConfig.getExcludeNamespaces() != null
- && !peerConfig.getExcludeNamespaces().isEmpty())
- || (peerConfig.getExcludeTableCFsMap() != null
- && !peerConfig.getExcludeTableCFsMap().isEmpty())) {
+ if ((peerConfig.getExcludeNamespaces() != null &&
+ !peerConfig.getExcludeNamespaces().isEmpty()) ||
+ (peerConfig.getExcludeTableCFsMap() != null &&
+ !peerConfig.getExcludeTableCFsMap().isEmpty())) {
throw new DoNotRetryIOException(
- "Need clean exclude-namespaces or exclude-table-cfs config firstly"
- + " when replicate_all flag is false");
+ "Need clean exclude-namespaces or exclude-table-cfs config firstly" +
+ " when replicate_all flag is false");
}
checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
peerConfig.getTableCFsMap());
@@ -330,11 +343,11 @@ public class ReplicationPeerManager {
// TODO: Add namespace, replicat_all flag back
if (peerConfig.replicateAllUserTables()) {
throw new DoNotRetryIOException(
- "Only support replicated table config for sync replication peer");
+ "Only support replicated table config for sync replication peer");
}
if (peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) {
throw new DoNotRetryIOException(
- "Only support replicated table config for sync replication peer");
+ "Only support replicated table config for sync replication peer");
}
if (peerConfig.getTableCFsMap() == null || peerConfig.getTableCFsMap().isEmpty()) {
throw new DoNotRetryIOException("Need config replicated tables for sync replication peer");
@@ -342,7 +355,7 @@ public class ReplicationPeerManager {
for (List<String> cfs : peerConfig.getTableCFsMap().values()) {
if (cfs != null && !cfs.isEmpty()) {
throw new DoNotRetryIOException(
- "Only support replicated table config for sync replication peer");
+ "Only support replicated table config for sync replication peer");
}
}
}
@@ -386,7 +399,7 @@ public class ReplicationPeerManager {
private void checkConfiguredWALEntryFilters(ReplicationPeerConfig peerConfig)
throws DoNotRetryIOException {
String filterCSV = peerConfig.getConfiguration()
- .get(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY);
+ .get(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY);
if (filterCSV != null && !filterCSV.isEmpty()) {
String[] filters = filterCSV.split(",");
for (String filter : filters) {
@@ -411,7 +424,7 @@ public class ReplicationPeerManager {
public static ReplicationPeerManager create(ZKWatcher zk, Configuration conf)
throws ReplicationException {
ReplicationPeerStorage peerStorage =
- ReplicationStorageFactory.getReplicationPeerStorage(zk, conf);
+ ReplicationStorageFactory.getReplicationPeerStorage(zk, conf);
ConcurrentMap<String, ReplicationPeerDescription> peers = new ConcurrentHashMap<>();
for (String peerId : peerStorage.listPeerIds()) {
ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId);
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index aad3b06..c253bff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -18,11 +18,12 @@
package org.apache.hadoop.hbase.master.replication;
import java.io.IOException;
-
+import java.util.List;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
@@ -32,26 +33,29 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerSyncReplicationStateTransitionState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TransitPeerSyncReplicationStateStateData;
/**
- * The procedure for transit current cluster state for a synchronous replication peer.
+ * The procedure for transit current sync replication state for a synchronous replication peer.
*/
@InterfaceAudience.Private
-public class TransitPeerSyncReplicationStateProcedure extends ModifyPeerProcedure {
+public class TransitPeerSyncReplicationStateProcedure
+ extends AbstractPeerProcedure<PeerSyncReplicationStateTransitionState> {
private static final Logger LOG =
LoggerFactory.getLogger(TransitPeerSyncReplicationStateProcedure.class);
- private SyncReplicationState state;
+ private SyncReplicationState fromState;
+
+ private SyncReplicationState toState;
public TransitPeerSyncReplicationStateProcedure() {
}
public TransitPeerSyncReplicationStateProcedure(String peerId, SyncReplicationState state) {
super(peerId);
- this.state = state;
+ this.toState = state;
}
@Override
@@ -60,99 +64,148 @@ public class TransitPeerSyncReplicationStateProcedure extends ModifyPeerProcedur
}
@Override
- protected void prePeerModification(MasterProcedureEnv env)
- throws IOException, ReplicationException {
- MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
- if (cpHost != null) {
- cpHost.preTransitReplicationPeerSyncReplicationState(peerId, state);
+ protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
+ super.serializeStateData(serializer);
+ TransitPeerSyncReplicationStateStateData.Builder builder =
+ TransitPeerSyncReplicationStateStateData.newBuilder()
+ .setToState(ReplicationPeerConfigUtil.toSyncReplicationState(toState));
+ if (fromState != null) {
+ builder.setFromState(ReplicationPeerConfigUtil.toSyncReplicationState(fromState));
}
- env.getReplicationPeerManager().preTransitPeerSyncReplicationState(peerId, state);
+ serializer.serialize(builder.build());
}
@Override
- protected void updatePeerStorage(MasterProcedureEnv env) throws ReplicationException {
- env.getReplicationPeerManager().transitPeerSyncReplicationState(peerId, state);
+ protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
+ super.deserializeStateData(serializer);
+ TransitPeerSyncReplicationStateStateData data =
+ serializer.deserialize(TransitPeerSyncReplicationStateStateData.class);
+ toState = ReplicationPeerConfigUtil.toSyncReplicationState(data.getToState());
+ if (data.hasFromState()) {
+ fromState = ReplicationPeerConfigUtil.toSyncReplicationState(data.getFromState());
+ }
}
@Override
- protected void postPeerModification(MasterProcedureEnv env)
- throws IOException, ReplicationException {
- LOG.info("Successfully transit current cluster state to {} in synchronous replication peer {}",
- state, peerId);
+ protected PeerSyncReplicationStateTransitionState getState(int stateId) {
+ return PeerSyncReplicationStateTransitionState.forNumber(stateId);
+ }
+
+ @Override
+ protected int getStateId(PeerSyncReplicationStateTransitionState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected PeerSyncReplicationStateTransitionState getInitialState() {
+ return PeerSyncReplicationStateTransitionState.PRE_PEER_SYNC_REPLICATION_STATE_TRANSITION;
+ }
+
+ private void preTransit(MasterProcedureEnv env) throws IOException {
MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
- env.getMasterCoprocessorHost().postTransitReplicationPeerSyncReplicationState(peerId, state);
+ cpHost.preTransitReplicationPeerSyncReplicationState(peerId, toState);
}
+ fromState = env.getReplicationPeerManager().preTransitPeerSyncReplicationState(peerId, toState);
}
- @Override
- protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
- super.serializeStateData(serializer);
- serializer.serialize(TransitPeerSyncReplicationStateStateData.newBuilder()
- .setSyncReplicationState(ReplicationPeerConfigUtil.toSyncReplicationState(state)).build());
+ private void postTransit(MasterProcedureEnv env) throws IOException {
+ LOG.info(
+ "Successfully transit current cluster state from {} to {} for sync replication peer {}",
+ fromState, toState, peerId);
+ MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ env.getMasterCoprocessorHost().postTransitReplicationPeerSyncReplicationState(peerId,
+ fromState, toState);
+ }
}
- @Override
- protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
- super.deserializeStateData(serializer);
- TransitPeerSyncReplicationStateStateData data =
- serializer.deserialize(TransitPeerSyncReplicationStateStateData.class);
- state = ReplicationPeerConfigUtil.toSyncReplicationState(data.getSyncReplicationState());
+ private List<RegionInfo> getRegionsToReopen(MasterProcedureEnv env) {
+ return env.getReplicationPeerManager().getPeerConfig(peerId).get().getTableCFsMap().keySet()
+ .stream()
+ .flatMap(tn -> env.getAssignmentManager().getRegionStates().getRegionsOfTable(tn).stream())
+ .collect(Collectors.toList());
}
@Override
- protected Flow executeFromState(MasterProcedureEnv env, PeerModificationState state)
+ protected Flow executeFromState(MasterProcedureEnv env,
+ PeerSyncReplicationStateTransitionState state)
throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
switch (state) {
- case PRE_PEER_MODIFICATION:
+ case PRE_PEER_SYNC_REPLICATION_STATE_TRANSITION:
try {
- prePeerModification(env);
+ preTransit(env);
} catch (IOException e) {
- LOG.warn("{} failed to call pre CP hook or the pre check is failed for peer {}, " +
- "mark the procedure as failure and give up", getClass().getName(), peerId, e);
- setFailure("master-" + getPeerOperationType().name().toLowerCase() + "-peer", e);
- releaseLatch();
+ LOG.warn("Failed to call pre CP hook or the pre check is failed for peer {} " +
+ "when transiting sync replication peer state to {}, " +
+ "mark the procedure as failure and give up", peerId, toState, e);
+ setFailure("master-transit-peer-sync-replication-state", e);
return Flow.NO_MORE_STATE;
- } catch (ReplicationException e) {
- LOG.warn("{} failed to call prePeerModification for peer {}, retry", getClass().getName(),
- peerId, e);
- throw new ProcedureYieldException();
}
- setNextState(PeerModificationState.UPDATE_PEER_STORAGE);
+ setNextState(PeerSyncReplicationStateTransitionState.SET_PEER_NEW_SYNC_REPLICATION_STATE);
return Flow.HAS_MORE_STATE;
- case UPDATE_PEER_STORAGE:
+ case SET_PEER_NEW_SYNC_REPLICATION_STATE:
try {
- updatePeerStorage(env);
+ env.getReplicationPeerManager().setPeerNewSyncReplicationState(peerId, toState);
} catch (ReplicationException e) {
- LOG.warn("{} update peer storage for peer {} failed, retry", getClass().getName(), peerId,
- e);
+ LOG.warn("Failed to update peer storage for peer {} when starting transiting sync " +
+ "replication peer state from {} to {}, retry", peerId, fromState, toState, e);
throw new ProcedureYieldException();
}
- setNextState(PeerModificationState.REFRESH_PEER_ON_RS);
+ setNextState(
+ PeerSyncReplicationStateTransitionState.REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN);
+ return Flow.HAS_MORE_STATE;
+ case REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN:
+ addChildProcedure(env.getMasterServices().getServerManager().getOnlineServersList().stream()
+ .map(sn -> new RefreshPeerProcedure(peerId, getPeerOperationType(), sn, 0))
+ .toArray(RefreshPeerProcedure[]::new));
+ if (fromState == SyncReplicationState.STANDBY &&
+ toState == SyncReplicationState.DOWNGRADE_ACTIVE) {
+ setNextState(PeerSyncReplicationStateTransitionState.REPLAY_REMOTE_WAL_IN_PEER);
+ } else {
+ setNextState(PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER);
+ }
+ return Flow.HAS_MORE_STATE;
+ case REPLAY_REMOTE_WAL_IN_PEER:
+ // TODO: replay remote wal when transiting from S to DA.
+ setNextState(PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER);
return Flow.HAS_MORE_STATE;
- case REFRESH_PEER_ON_RS:
- // TODO: Need add child procedure for every RegionServer
- setNextState(PeerModificationState.POST_PEER_MODIFICATION);
+ case REOPEN_ALL_REGIONS_IN_PEER:
+ addChildProcedure(
+ env.getAssignmentManager().createReopenProcedures(getRegionsToReopen(env)));
+ setNextState(
+ PeerSyncReplicationStateTransitionState.TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE);
return Flow.HAS_MORE_STATE;
- case POST_PEER_MODIFICATION:
+ case TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE:
try {
- postPeerModification(env);
+ env.getReplicationPeerManager().transitPeerSyncReplicationState(peerId, toState);
} catch (ReplicationException e) {
- LOG.warn("{} failed to call postPeerModification for peer {}, retry",
- getClass().getName(), peerId, e);
+ LOG.warn("Failed to update peer storage for peer {} when ending transiting sync " +
+ "replication peer state from {} to {}, retry", peerId, fromState, toState, e);
throw new ProcedureYieldException();
+ }
+ setNextState(
+ PeerSyncReplicationStateTransitionState.REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END);
+ return Flow.HAS_MORE_STATE;
+ case REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END:
+ addChildProcedure(env.getMasterServices().getServerManager().getOnlineServersList().stream()
+ .map(sn -> new RefreshPeerProcedure(peerId, getPeerOperationType(), sn, 1))
+ .toArray(RefreshPeerProcedure[]::new));
+ setNextState(
+ PeerSyncReplicationStateTransitionState.POST_PEER_SYNC_REPLICATION_STATE_TRANSITION);
+ case POST_PEER_SYNC_REPLICATION_STATE_TRANSITION:
+ try {
+ postTransit(env);
} catch (IOException e) {
- LOG.warn("{} failed to call post CP hook for peer {}, " +
- "ignore since the procedure has already done", getClass().getName(), peerId, e);
+ LOG.warn(
+ "Failed to call post CP hook for peer {} when transiting sync replication " +
+ "peer state from {} to {}, ignore since the procedure has already done",
+ peerId, fromState, toState, e);
}
- releaseLatch();
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);
}
}
- private void releaseLatch() {
- ProcedurePrepareLatch.releaseLatch(latch, this);
- }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 0d59b12..61b7412 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1785,21 +1785,27 @@ public class HRegionServer extends HasThread implements
* be hooked up to WAL.
*/
private void setupWALAndReplication() throws IOException {
+ boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster &&
+ (!LoadBalancer.isTablesOnMaster(conf) || LoadBalancer.isSystemTablesOnlyOnMaster(conf));
+ if (isMasterNoTableOrSystemTableOnly) {
+ conf.setBoolean(HConstants.SYNC_REPLICATION_ENABLED, false);
+ }
WALFactory factory = new WALFactory(conf, serverName.toString());
+ if (!isMasterNoTableOrSystemTableOnly) {
+ // TODO Replication make assumptions here based on the default filesystem impl
+ Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
+ String logName = AbstractFSWALProvider.getWALDirectoryName(this.serverName.toString());
- // TODO Replication make assumptions here based on the default filesystem impl
- Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
- String logName = AbstractFSWALProvider.getWALDirectoryName(this.serverName.toString());
-
- Path logDir = new Path(walRootDir, logName);
- LOG.debug("logDir={}", logDir);
- if (this.walFs.exists(logDir)) {
- throw new RegionServerRunningException(
- "Region server has already created directory at " + this.serverName.toString());
+ Path logDir = new Path(walRootDir, logName);
+ LOG.debug("logDir={}", logDir);
+ if (this.walFs.exists(logDir)) {
+ throw new RegionServerRunningException(
+ "Region server has already created directory at " + this.serverName.toString());
+ }
+ // Instantiate replication if replication enabled. Pass it the log directories.
+ createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir,
+ factory.getWALProvider());
}
- // Instantiate replication if replication enabled. Pass it the log directories.
- createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir,
- factory.getWALProvider());
this.walFactory = factory;
}
@@ -2897,11 +2903,6 @@ public class HRegionServer extends HasThread implements
*/
private static void createNewReplicationInstance(Configuration conf, HRegionServer server,
FileSystem walFs, Path walDir, Path oldWALDir, WALProvider walProvider) throws IOException {
- if ((server instanceof HMaster) &&
- (!LoadBalancer.isTablesOnMaster(conf) || LoadBalancer.isSystemTablesOnlyOnMaster(conf))) {
- return;
- }
-
// read in the name of the source replication class from the config file.
String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME,
HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT);
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java
index 23ba773..4529943 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java
@@ -18,17 +18,22 @@
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.replication.regionserver.PeerProcedureHandler;
+import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerInfoProvider;
import org.apache.yetus.audience.InterfaceAudience;
/**
- * A source for a replication stream has to expose this service.
- * This service allows an application to hook into the
- * regionserver and watch for new transactions.
+ * A source for a replication stream has to expose this service. This service allows an application
+ * to hook into the regionserver and watch for new transactions.
*/
@InterfaceAudience.Private
public interface ReplicationSourceService extends ReplicationService {
/**
+ * Returns an info provider for sync replication peer.
+ */
+ SyncReplicationPeerInfoProvider getSyncReplicationPeerInfoProvider();
+
+ /**
* Returns a Handler to handle peer procedures.
*/
PeerProcedureHandler getPeerProcedureHandler();
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
index 6df2af9..efafd09 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
@@ -28,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
public interface PeerActionListener {
- default void peerRemoved(String peerId) {}
+ static final PeerActionListener DUMMY = new PeerActionListener() {};
default void peerSyncReplicationStateChange(String peerId, SyncReplicationState from,
- SyncReplicationState to) {}
+ SyncReplicationState to, int stage) {}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java
index 65da9af..52b604b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java
@@ -15,11 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hbase.replication.regionserver;
import java.io.IOException;
-
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.yetus.audience.InterfaceAudience;
@@ -29,13 +28,16 @@ import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
public interface PeerProcedureHandler {
- public void addPeer(String peerId) throws ReplicationException, IOException;
+ void addPeer(String peerId) throws ReplicationException, IOException;
+
+ void removePeer(String peerId) throws ReplicationException, IOException;
- public void removePeer(String peerId) throws ReplicationException, IOException;
+ void disablePeer(String peerId) throws ReplicationException, IOException;
- public void disablePeer(String peerId) throws ReplicationException, IOException;
+ void enablePeer(String peerId) throws ReplicationException, IOException;
- public void enablePeer(String peerId) throws ReplicationException, IOException;
+ void updatePeerConfig(String peerId) throws ReplicationException, IOException;
- public void updatePeerConfig(String peerId) throws ReplicationException, IOException;
+ void transitSyncReplicationPeerState(String peerId, int stage, HRegionServer rs)
+ throws ReplicationException, IOException;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
index a02d181..548ea41 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
@@ -19,22 +19,32 @@ package org.apache.hadoop.hbase.replication.regionserver;
import java.io.IOException;
import java.util.concurrent.locks.Lock;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.replication.ReplicationUtils;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
public class PeerProcedureHandlerImpl implements PeerProcedureHandler {
+ private static final Logger LOG = LoggerFactory.getLogger(PeerProcedureHandlerImpl.class);
+
private final ReplicationSourceManager replicationSourceManager;
+ private final PeerActionListener peerActionListener;
private final KeyLocker<String> peersLock = new KeyLocker<>();
- public PeerProcedureHandlerImpl(ReplicationSourceManager replicationSourceManager) {
+ public PeerProcedureHandlerImpl(ReplicationSourceManager replicationSourceManager,
+ PeerActionListener peerActionListener) {
this.replicationSourceManager = replicationSourceManager;
+ this.peerActionListener = peerActionListener;
}
@Override
@@ -60,7 +70,6 @@ public class PeerProcedureHandlerImpl implements PeerProcedureHandler {
}
private void refreshPeerState(String peerId) throws ReplicationException, IOException {
- PeerState newState;
Lock peerLock = peersLock.acquireLock(peerId);
ReplicationPeerImpl peer = null;
PeerState oldState = null;
@@ -71,7 +80,7 @@ public class PeerProcedureHandlerImpl implements PeerProcedureHandler {
throw new ReplicationException("Peer with id=" + peerId + " is not cached.");
}
oldState = peer.getPeerState();
- newState = replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
+ PeerState newState = replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
// RS need to start work with the new replication state change
if (oldState.equals(PeerState.ENABLED) && newState.equals(PeerState.DISABLED)) {
replicationSourceManager.refreshSources(peerId);
@@ -109,7 +118,7 @@ public class PeerProcedureHandlerImpl implements PeerProcedureHandler {
}
oldConfig = peer.getPeerConfig();
ReplicationPeerConfig newConfig =
- replicationSourceManager.getReplicationPeers().refreshPeerConfig(peerId);
+ replicationSourceManager.getReplicationPeers().refreshPeerConfig(peerId);
// RS need to start work with the new replication config change
if (!ReplicationUtils.isKeyConfigEqual(oldConfig, newConfig)) {
replicationSourceManager.refreshSources(peerId);
@@ -123,4 +132,42 @@ public class PeerProcedureHandlerImpl implements PeerProcedureHandler {
peerLock.unlock();
}
}
+
+ @Override
+ public void transitSyncReplicationPeerState(String peerId, int stage, HRegionServer rs)
+ throws ReplicationException, IOException {
+ ReplicationPeers replicationPeers = replicationSourceManager.getReplicationPeers();
+ Lock peerLock = peersLock.acquireLock(peerId);
+ try {
+ ReplicationPeerImpl peer = replicationPeers.getPeer(peerId);
+ if (peer == null) {
+ throw new ReplicationException("Peer with id=" + peerId + " is not cached.");
+ }
+ if (!peer.getPeerConfig().isSyncReplication()) {
+ throw new ReplicationException("Peer with id=" + peerId + " is not synchronous.");
+ }
+ SyncReplicationState newState = peer.getNewSyncReplicationState();
+ if (stage == 0) {
+ if (newState != SyncReplicationState.NONE) {
+ LOG.warn("The new sync replication state for peer {} has already been set to {}, " +
+ "this should be a retry, give up", peerId, newState);
+ return;
+ }
+ newState = replicationPeers.refreshPeerNewSyncReplicationState(peerId);
+ SyncReplicationState oldState = peer.getSyncReplicationState();
+ peerActionListener.peerSyncReplicationStateChange(peerId, oldState, newState, stage);
+ } else {
+ if (newState == SyncReplicationState.NONE) {
+ LOG.warn("The new sync replication state for peer {} has already been clear, and the " +
+ "current state is {}, this should be a retry, give up", peerId, newState);
+ return;
+ }
+ SyncReplicationState oldState = peer.getSyncReplicationState();
+ peerActionListener.peerSyncReplicationStateChange(peerId, oldState, newState, stage);
+ peer.transitSyncReplicationState();
+ }
+ } finally {
+ peerLock.unlock();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java
index 7ada24b..8fe16bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java
@@ -35,12 +35,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.R
public class RefreshPeerCallable implements RSProcedureCallable {
private static final Logger LOG = Logger.getLogger(RefreshPeerCallable.class);
+
private HRegionServer rs;
private String peerId;
private PeerModificationType type;
+ private int stage;
+
private Exception initError;
@Override
@@ -67,6 +70,9 @@ public class RefreshPeerCallable implements RSProcedureCallable {
case UPDATE_PEER_CONFIG:
handler.updatePeerConfig(this.peerId);
break;
+ case TRANSIT_SYNC_REPLICATION_STATE:
+ handler.transitSyncReplicationPeerState(peerId, stage, rs);
+ break;
default:
throw new IllegalArgumentException("Unknown peer modification type: " + type);
}
@@ -80,6 +86,7 @@ public class RefreshPeerCallable implements RSProcedureCallable {
RefreshPeerParameter param = RefreshPeerParameter.parseFrom(parameter);
this.peerId = param.getPeerId();
this.type = param.getType();
+ this.stage = param.getStage();
} catch (InvalidProtocolBufferException e) {
initError = e;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/03057e19/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
index 7803ac4..f355021 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
import org.apache.hadoop.hbase.replication.ReplicationTracker;
import org.apache.hadoop.hbase.replication.ReplicationUtils;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider;
import org.apache.hadoop.hbase.wal.WALProvider;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.yetus.audience.InterfaceAudience;
@@ -66,6 +67,7 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer
private ReplicationTracker replicationTracker;
private Configuration conf;
private ReplicationSink replicationSink;
+ private SyncReplicationPeerInfoProvider syncReplicationPeerInfoProvider;
// Hosting server
private Server server;
/** Statistics thread schedule pool */
@@ -120,19 +122,30 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer
} catch (KeeperException ke) {
throw new IOException("Could not read cluster id", ke);
}
+ SyncReplicationPeerMappingManager mapping = new SyncReplicationPeerMappingManager();
this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers,
replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId,
- walProvider != null ? walProvider.getWALFileLengthProvider() : p -> OptionalLong.empty());
+ walProvider != null ? walProvider.getWALFileLengthProvider() : p -> OptionalLong.empty(),
+ mapping);
+ this.syncReplicationPeerInfoProvider =
+ new SyncReplicationPeerInfoProviderImpl(replicationPeers, mapping);
+ PeerActionListener peerActionListener = PeerActionListener.DUMMY;
if (walProvider != null) {
walProvider
.addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager));
+ if (walProvider instanceof SyncReplicationWALProvider) {
+ SyncReplicationWALProvider syncWALProvider = (SyncReplicationWALProvider) walProvider;
+ peerActionListener = syncWALProvider;
+ syncWALProvider.setPeerInfoProvider(syncReplicationPeerInfoProvider);
+ }
}
this.statsThreadPeriod =
this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60);
LOG.debug("ReplicationStatisticsThread " + this.statsThreadPeriod);
this.replicationLoad = new ReplicationLoad();
- this.peerProcedureHandler = new PeerProcedureHandlerImpl(replicationManager);
+ this.peerProcedureHandler =
+ new PeerProcedureHandlerImpl(replicationManager, peerActionListener);
}
@Override
@@ -270,4 +283,9 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer
MetricsSink sinkMetrics = this.replicationSink.getSinkMetrics();
this.replicationLoad.buildReplicationLoad(sourceMetricsList, sinkMetrics);
}
+
+ @Override
+ public SyncReplicationPeerInfoProvider getSyncReplicationPeerInfoProvider() {
+ return syncReplicationPeerInfoProvider;
+ }
}
[11/11] hbase git commit: HBASE-19857 Complete the procedure for
adding a sync replication peer
Posted by zh...@apache.org.
HBASE-19857 Complete the procedure for adding a sync replication peer
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0a2661a9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0a2661a9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0a2661a9
Branch: refs/heads/HBASE-19064
Commit: 0a2661a94e2f9469f9c2367e772b009bc9175639
Parents: 6fea1c9
Author: zhangduo <zh...@apache.org>
Authored: Thu Jan 25 20:09:00 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Feb 10 17:49:13 2018 +0800
----------------------------------------------------------------------
.../hbase/replication/ReplicationPeer.java | 9 +
.../hbase/replication/ReplicationPeerImpl.java | 28 +--
.../hbase/replication/ReplicationPeers.java | 3 +-
.../regionserver/PeerActionListener.java | 10 +-
.../SyncReplicationPeerProvider.java | 35 +++
.../SynchronousReplicationPeerProvider.java | 35 ---
.../hbase/wal/SyncReplicationWALProvider.java | 234 +++++++++++++++++++
.../wal/SynchronousReplicationWALProvider.java | 225 ------------------
.../org/apache/hadoop/hbase/wal/WALFactory.java | 8 +-
.../TestReplicationSourceManager.java | 3 +
.../wal/TestSyncReplicationWALProvider.java | 153 ++++++++++++
.../TestSynchronousReplicationWALProvider.java | 153 ------------
12 files changed, 456 insertions(+), 440 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 2da3cce..0196a9a 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,15 @@ public interface ReplicationPeer {
PeerState getPeerState();
/**
+ * Returns the sync replication state of the peer by reading local cache.
+ * <p>
+ * If the peer is not a synchronous replication peer, a {@link SyncReplicationState#NONE} will be
+ * returned.
+ * @return the sync replication state
+ */
+ SyncReplicationState getSyncReplicationState();
+
+ /**
* Test whether the peer is enabled.
* @return {@code true} if enabled, otherwise {@code false}.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index d656466..ff3f662 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -36,6 +36,8 @@ public class ReplicationPeerImpl implements ReplicationPeer {
private volatile PeerState peerState;
+ private volatile SyncReplicationState syncReplicationState;
+
private final List<ReplicationPeerConfigListener> peerConfigListeners;
/**
@@ -45,12 +47,13 @@ public class ReplicationPeerImpl implements ReplicationPeer {
* @param id string representation of this peer's identifier
* @param peerConfig configuration for the replication peer
*/
- public ReplicationPeerImpl(Configuration conf, String id, boolean peerState,
- ReplicationPeerConfig peerConfig) {
+ public ReplicationPeerImpl(Configuration conf, String id, ReplicationPeerConfig peerConfig,
+ boolean peerState, SyncReplicationState syncReplicationState) {
this.conf = conf;
this.id = id;
this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED;
this.peerConfig = peerConfig;
+ this.syncReplicationState = syncReplicationState;
this.peerConfigListeners = new ArrayList<>();
}
@@ -77,37 +80,26 @@ public class ReplicationPeerImpl implements ReplicationPeer {
return peerState;
}
- /**
- * Get the peer config object
- * @return the ReplicationPeerConfig for this peer
- */
+ @Override
+ public SyncReplicationState getSyncReplicationState() {
+ return syncReplicationState;
+ }
+
@Override
public ReplicationPeerConfig getPeerConfig() {
return peerConfig;
}
- /**
- * Get the configuration object required to communicate with this peer
- * @return configuration object
- */
@Override
public Configuration getConfiguration() {
return conf;
}
- /**
- * Get replicable (table, cf-list) map of this peer
- * @return the replicable (table, cf-list) map
- */
@Override
public Map<TableName, List<String>> getTableCFs() {
return this.peerConfig.getTableCFsMap();
}
- /**
- * Get replicable namespace set of this peer
- * @return the replicable namespaces set
- */
@Override
public Set<String> getNamespaces() {
return this.peerConfig.getNamespaces();
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index eacb2f4..f120dbc 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -129,7 +129,8 @@ public class ReplicationPeers {
private ReplicationPeerImpl createPeer(String peerId) throws ReplicationException {
ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId);
boolean enabled = peerStorage.isPeerEnabled(peerId);
+ SyncReplicationState syncReplicationState = peerStorage.getPeerSyncReplicationState(peerId);
return new ReplicationPeerImpl(ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf),
- peerId, enabled, peerConfig);
+ peerId, peerConfig, enabled, syncReplicationState);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
index 74ad626..6df2af9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java
@@ -17,17 +17,19 @@
*/
package org.apache.hadoop.hbase.replication.regionserver;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Get notification for replication peer events. Mainly used for telling the
- * {@link org.apache.hadoop.hbase.wal.SynchronousReplicationWALProvider} to close some WAL if not
- * used any more.
- * <p>
- * TODO: Also need a synchronous peer state change notification.
+ * {@link org.apache.hadoop.hbase.wal.SyncReplicationWALProvider} to close some WAL if not used any
+ * more.
*/
@InterfaceAudience.Private
public interface PeerActionListener {
default void peerRemoved(String peerId) {}
+
+ default void peerSyncReplicationStateChange(String peerId, SyncReplicationState from,
+ SyncReplicationState to) {}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerProvider.java
new file mode 100644
index 0000000..b97bf7e
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerProvider.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Get the peer id and remote root dir if the region is synchronously replicated.
+ */
+@InterfaceAudience.Private
+public interface SyncReplicationPeerProvider {
+
+ /**
+ * Return the peer id and remote WAL directory if the region is synchronously replicated.
+ */
+ Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info);
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SynchronousReplicationPeerProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SynchronousReplicationPeerProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SynchronousReplicationPeerProvider.java
deleted file mode 100644
index b4e04fb..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SynchronousReplicationPeerProvider.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication.regionserver;
-
-import java.util.Optional;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Get the peer id and remote root dir if the region is synchronously replicated.
- */
-@InterfaceAudience.Private
-public interface SynchronousReplicationPeerProvider {
-
- /**
- * Return the peer id and remote WAL directory if the region is synchronously replicated.
- */
- Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info);
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
new file mode 100644
index 0000000..bccc842
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.wal;
+
+import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.WAL_FILE_NAME_DELIMITER;
+import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.getWALArchiveDirectoryName;
+import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.getWALDirectoryName;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Lock;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL;
+import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.replication.regionserver.PeerActionListener;
+import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerProvider;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.KeyLocker;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Streams;
+import org.apache.hbase.thirdparty.io.netty.channel.Channel;
+import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
+
+/**
+ * The special {@link WALProvider} for synchronous replication.
+ * <p>
+ * It works like an interceptor, when getting WAL, first it will check if the given region should be
+ * replicated synchronously, if so it will return a special WAL for it, otherwise it will delegate
+ * the request to the normal {@link WALProvider}.
+ */
+@InterfaceAudience.Private
+public class SyncReplicationWALProvider implements WALProvider, PeerActionListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(SyncReplicationWALProvider.class);
+
+ private static final String LOG_SUFFIX = ".syncrep";
+
+ private final WALProvider provider;
+
+ private final SyncReplicationPeerProvider peerProvider;
+
+ private WALFactory factory;
+
+ private Configuration conf;
+
+ private List<WALActionsListener> listeners = new ArrayList<>();
+
+ private EventLoopGroup eventLoopGroup;
+
+ private Class<? extends Channel> channelClass;
+
+ private AtomicBoolean initialized = new AtomicBoolean(false);
+
+ private final ConcurrentMap<String, DualAsyncFSWAL> peerId2WAL = new ConcurrentHashMap<>();
+
+ private final KeyLocker<String> createLock = new KeyLocker<>();
+
+ SyncReplicationWALProvider(WALProvider provider, SyncReplicationPeerProvider peerProvider) {
+ this.provider = provider;
+ this.peerProvider = peerProvider;
+ }
+
+ @Override
+ public void init(WALFactory factory, Configuration conf, String providerId) throws IOException {
+ if (!initialized.compareAndSet(false, true)) {
+ throw new IllegalStateException("WALProvider.init should only be called once.");
+ }
+ provider.init(factory, conf, providerId);
+ this.conf = conf;
+ this.factory = factory;
+ Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass =
+ NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf);
+ eventLoopGroup = eventLoopGroupAndChannelClass.getFirst();
+ channelClass = eventLoopGroupAndChannelClass.getSecond();
+ }
+
+ private String getLogPrefix(String peerId) {
+ return factory.factoryId + WAL_FILE_NAME_DELIMITER + peerId;
+ }
+
+ private DualAsyncFSWAL createWAL(String peerId, String remoteWALDir) throws IOException {
+ Path remoteWALDirPath = new Path(remoteWALDir);
+ FileSystem remoteFs = remoteWALDirPath.getFileSystem(conf);
+ return new DualAsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), remoteFs,
+ CommonFSUtils.getWALRootDir(conf), new Path(remoteWALDirPath, peerId),
+ getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId),
+ conf, listeners, true, getLogPrefix(peerId), LOG_SUFFIX, eventLoopGroup, channelClass);
+ }
+
+ private DualAsyncFSWAL getWAL(String peerId, String remoteWALDir) throws IOException {
+ DualAsyncFSWAL wal = peerId2WAL.get(peerId);
+ if (wal != null) {
+ return wal;
+ }
+ Lock lock = createLock.acquireLock(peerId);
+ try {
+ wal = peerId2WAL.get(peerId);
+ if (wal == null) {
+ wal = createWAL(peerId, remoteWALDir);
+ peerId2WAL.put(peerId, wal);
+ wal.init();
+ }
+ return wal;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public WAL getWAL(RegionInfo region) throws IOException {
+ Optional<Pair<String, String>> peerIdAndRemoteWALDir =
+ peerProvider.getPeerIdAndRemoteWALDir(region);
+ if (peerIdAndRemoteWALDir.isPresent()) {
+ Pair<String, String> pair = peerIdAndRemoteWALDir.get();
+ return getWAL(pair.getFirst(), pair.getSecond());
+ } else {
+ return provider.getWAL(region);
+ }
+ }
+
+ private Stream<WAL> getWALStream() {
+ return Streams.concat(peerId2WAL.values().stream(), provider.getWALs().stream());
+ }
+
+ @Override
+ public List<WAL> getWALs() {
+ return getWALStream().collect(Collectors.toList());
+ }
+
+ @Override
+ public void shutdown() throws IOException {
+ // save the last exception and rethrow
+ IOException failure = null;
+ for (DualAsyncFSWAL wal : peerId2WAL.values()) {
+ try {
+ wal.shutdown();
+ } catch (IOException e) {
+ LOG.error("Shutdown WAL failed", e);
+ failure = e;
+ }
+ }
+ provider.shutdown();
+ if (failure != null) {
+ throw failure;
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ // save the last exception and rethrow
+ IOException failure = null;
+ for (DualAsyncFSWAL wal : peerId2WAL.values()) {
+ try {
+ wal.close();
+ } catch (IOException e) {
+ LOG.error("Close WAL failed", e);
+ failure = e;
+ }
+ }
+ provider.close();
+ if (failure != null) {
+ throw failure;
+ }
+ }
+
+ @Override
+ public long getNumLogFiles() {
+ return peerId2WAL.size() + provider.getNumLogFiles();
+ }
+
+ @Override
+ public long getLogFileSize() {
+ return peerId2WAL.values().stream().mapToLong(DualAsyncFSWAL::getLogFileSize).sum() +
+ provider.getLogFileSize();
+ }
+
+ private void safeClose(WAL wal) {
+ if (wal != null) {
+ try {
+ wal.close();
+ } catch (IOException e) {
+ LOG.error("Close WAL failed", e);
+ }
+ }
+ }
+
+ @Override
+ public void addWALActionsListener(WALActionsListener listener) {
+ listeners.add(listener);
+ provider.addWALActionsListener(listener);
+ }
+
+ @Override
+ public void peerRemoved(String peerId) {
+ safeClose(peerId2WAL.remove(peerId));
+ }
+
+ @Override
+ public void peerSyncReplicationStateChange(String peerId, SyncReplicationState from,
+ SyncReplicationState to) {
+ assert to == SyncReplicationState.DOWNGRADE_ACTIVE;
+ safeClose(peerId2WAL.remove(peerId));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SynchronousReplicationWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SynchronousReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SynchronousReplicationWALProvider.java
deleted file mode 100644
index f60599f..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SynchronousReplicationWALProvider.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.wal;
-
-import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.WAL_FILE_NAME_DELIMITER;
-import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.getWALArchiveDirectoryName;
-import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.getWALDirectoryName;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.Lock;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL;
-import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-import org.apache.hadoop.hbase.replication.regionserver.PeerActionListener;
-import org.apache.hadoop.hbase.replication.regionserver.SynchronousReplicationPeerProvider;
-import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.KeyLocker;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.common.collect.Streams;
-import org.apache.hbase.thirdparty.io.netty.channel.Channel;
-import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
-
-/**
- * The special {@link WALProvider} for synchronous replication.
- * <p>
- * It works like an interceptor, when getting WAL, first it will check if the given region should be
- * replicated synchronously, if so it will return a special WAL for it, otherwise it will delegate
- * the request to the normal {@link WALProvider}.
- */
-@InterfaceAudience.Private
-public class SynchronousReplicationWALProvider implements WALProvider, PeerActionListener {
-
- private static final Logger LOG =
- LoggerFactory.getLogger(SynchronousReplicationWALProvider.class);
-
- private static final String LOG_SUFFIX = ".syncrep";
-
- private final WALProvider provider;
-
- private final SynchronousReplicationPeerProvider peerProvider;
-
- private WALFactory factory;
-
- private Configuration conf;
-
- private List<WALActionsListener> listeners = new ArrayList<>();
-
- private EventLoopGroup eventLoopGroup;
-
- private Class<? extends Channel> channelClass;
-
- private AtomicBoolean initialized = new AtomicBoolean(false);
-
- private final ConcurrentMap<String, DualAsyncFSWAL> peerId2WAL = new ConcurrentHashMap<>();
-
- private final KeyLocker<String> createLock = new KeyLocker<>();
-
- SynchronousReplicationWALProvider(WALProvider provider,
- SynchronousReplicationPeerProvider peerProvider) {
- this.provider = provider;
- this.peerProvider = peerProvider;
- }
-
- @Override
- public void init(WALFactory factory, Configuration conf, String providerId) throws IOException {
- if (!initialized.compareAndSet(false, true)) {
- throw new IllegalStateException("WALProvider.init should only be called once.");
- }
- provider.init(factory, conf, providerId);
- this.conf = conf;
- this.factory = factory;
- Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass =
- NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf);
- eventLoopGroup = eventLoopGroupAndChannelClass.getFirst();
- channelClass = eventLoopGroupAndChannelClass.getSecond();
- }
-
- private String getLogPrefix(String peerId) {
- return factory.factoryId + WAL_FILE_NAME_DELIMITER + peerId;
- }
-
- private DualAsyncFSWAL createWAL(String peerId, String remoteWALDir) throws IOException {
- Path remoteWALDirPath = new Path(remoteWALDir);
- FileSystem remoteFs = remoteWALDirPath.getFileSystem(conf);
- return new DualAsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), remoteFs,
- CommonFSUtils.getWALRootDir(conf), new Path(remoteWALDirPath, peerId),
- getWALDirectoryName(factory.factoryId), getWALArchiveDirectoryName(conf, factory.factoryId),
- conf, listeners, true, getLogPrefix(peerId), LOG_SUFFIX, eventLoopGroup, channelClass);
- }
-
- private DualAsyncFSWAL getWAL(String peerId, String remoteWALDir) throws IOException {
- DualAsyncFSWAL wal = peerId2WAL.get(peerId);
- if (wal != null) {
- return wal;
- }
- Lock lock = createLock.acquireLock(peerId);
- try {
- wal = peerId2WAL.get(peerId);
- if (wal == null) {
- wal = createWAL(peerId, remoteWALDir);
- peerId2WAL.put(peerId, wal);
- wal.init();
- }
- return wal;
- } finally {
- lock.unlock();
- }
- }
-
- @Override
- public WAL getWAL(RegionInfo region) throws IOException {
- Optional<Pair<String, String>> peerIdAndRemoteWALDir =
- peerProvider.getPeerIdAndRemoteWALDir(region);
- if (peerIdAndRemoteWALDir.isPresent()) {
- Pair<String, String> pair = peerIdAndRemoteWALDir.get();
- return getWAL(pair.getFirst(), pair.getSecond());
- } else {
- return provider.getWAL(region);
- }
- }
-
- private Stream<WAL> getWALStream() {
- return Streams.concat(peerId2WAL.values().stream(), provider.getWALs().stream());
- }
-
- @Override
- public List<WAL> getWALs() {
- return getWALStream().collect(Collectors.toList());
- }
-
- @Override
- public void shutdown() throws IOException {
- // save the last exception and rethrow
- IOException failure = null;
- for (DualAsyncFSWAL wal : peerId2WAL.values()) {
- try {
- wal.shutdown();
- } catch (IOException e) {
- LOG.error("Shutdown WAL failed", e);
- failure = e;
- }
- }
- provider.shutdown();
- if (failure != null) {
- throw failure;
- }
- }
-
- @Override
- public void close() throws IOException {
- // save the last exception and rethrow
- IOException failure = null;
- for (DualAsyncFSWAL wal : peerId2WAL.values()) {
- try {
- wal.close();
- } catch (IOException e) {
- LOG.error("Close WAL failed", e);
- failure = e;
- }
- }
- provider.close();
- if (failure != null) {
- throw failure;
- }
- }
-
- @Override
- public long getNumLogFiles() {
- return peerId2WAL.size() + provider.getNumLogFiles();
- }
-
- @Override
- public long getLogFileSize() {
- return peerId2WAL.values().stream().mapToLong(DualAsyncFSWAL::getLogFileSize).sum() +
- provider.getLogFileSize();
- }
-
- @Override
- public void peerRemoved(String peerId) {
- WAL wal = peerId2WAL.remove(peerId);
- if (wal != null) {
- try {
- wal.close();
- } catch (IOException e) {
- LOG.error("Close WAL failed", e);
- }
- }
- }
-
- @Override
- public void addWALActionsListener(WALActionsListener listener) {
- listeners.add(listener);
- provider.addWALActionsListener(listener);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index 4e519ee..06999ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
-import org.apache.hadoop.hbase.replication.regionserver.SynchronousReplicationPeerProvider;
+import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerProvider;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
@@ -186,7 +186,7 @@ public class WALFactory {
* Remove it once we can integrate the synchronous replication logic in RS.
*/
@VisibleForTesting
- WALFactory(Configuration conf, String factoryId, SynchronousReplicationPeerProvider peerProvider)
+ WALFactory(Configuration conf, String factoryId, SyncReplicationPeerProvider peerProvider)
throws IOException {
timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000);
/* TODO Both of these are probably specific to the fs wal provider */
@@ -195,9 +195,9 @@ public class WALFactory {
this.conf = conf;
this.factoryId = factoryId;
WALProvider provider = createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER));
- this.provider = new SynchronousReplicationWALProvider(provider, peerProvider);
- this.provider.addWALActionsListener(new MetricsWAL());
+ this.provider = new SyncReplicationWALProvider(provider, peerProvider);
this.provider.init(this, conf, null);
+ this.provider.addWALActionsListener(new MetricsWAL());
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index db62b49..1204c9b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -172,6 +172,9 @@ public abstract class TestReplicationSourceManager {
ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/peer-state");
ZKUtil.setData(zkw, "/hbase/replication/peers/1/peer-state",
ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
+ ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/sync-rep-state");
+ ZKUtil.setData(zkw, "/hbase/replication/peers/1/sync-rep-state",
+ Bytes.toBytes(SyncReplicationState.NONE.ordinal()));
ZKUtil.createWithParents(zkw, "/hbase/replication/state");
ZKUtil.setData(zkw, "/hbase/replication/state", ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
new file mode 100644
index 0000000..60a9e13
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.wal;
+
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+
+import java.io.IOException;
+import java.util.Optional;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
+import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL;
+import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
+import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogTestHelper;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ RegionServerTests.class, MediumTests.class })
+public class TestSyncReplicationWALProvider {
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static String PEER_ID = "1";
+
+ private static String REMOTE_WAL_DIR = "/RemoteWAL";
+
+ private static TableName TABLE = TableName.valueOf("table");
+
+ private static TableName TABLE_NO_REP = TableName.valueOf("table-no-rep");
+
+ private static RegionInfo REGION = RegionInfoBuilder.newBuilder(TABLE).build();
+
+ private static RegionInfo REGION_NO_REP = RegionInfoBuilder.newBuilder(TABLE_NO_REP).build();
+
+ private static WALFactory FACTORY;
+
+ private static Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info) {
+ if (info.getTable().equals(TABLE)) {
+ return Optional.of(Pair.newPair(PEER_ID, REMOTE_WAL_DIR));
+ } else {
+ return Optional.empty();
+ }
+ }
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ UTIL.startMiniDFSCluster(3);
+ FACTORY = new WALFactory(UTIL.getConfiguration(), "test",
+ TestSyncReplicationWALProvider::getPeerIdAndRemoteWALDir);
+ UTIL.getTestFileSystem().mkdirs(new Path(REMOTE_WAL_DIR, PEER_ID));
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws IOException {
+ FACTORY.close();
+ UTIL.shutdownMiniDFSCluster();
+ }
+
+ private void testReadWrite(DualAsyncFSWAL wal) throws Exception {
+ int recordCount = 100;
+ int columnCount = 10;
+ byte[] row = Bytes.toBytes("testRow");
+ long timestamp = System.currentTimeMillis();
+ MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
+ ProtobufLogTestHelper.doWrite(wal, REGION, TABLE, columnCount, recordCount, row, timestamp,
+ mvcc);
+ Path localFile = wal.getCurrentFileName();
+ Path remoteFile = new Path(REMOTE_WAL_DIR + "/" + PEER_ID, localFile.getName());
+ try (ProtobufLogReader reader =
+ (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) {
+ ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row,
+ timestamp);
+ }
+ try (ProtobufLogReader reader =
+ (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) {
+ ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row,
+ timestamp);
+ }
+ wal.rollWriter();
+ DistributedFileSystem dfs = (DistributedFileSystem) UTIL.getDFSCluster().getFileSystem();
+ UTIL.waitFor(5000, new ExplainingPredicate<Exception>() {
+
+ @Override
+ public boolean evaluate() throws Exception {
+ return dfs.isFileClosed(localFile) && dfs.isFileClosed(remoteFile);
+ }
+
+ @Override
+ public String explainFailure() throws Exception {
+ StringBuilder sb = new StringBuilder();
+ if (!dfs.isFileClosed(localFile)) {
+ sb.append(localFile + " has not been closed yet.");
+ }
+ if (!dfs.isFileClosed(remoteFile)) {
+ sb.append(remoteFile + " has not been closed yet.");
+ }
+ return sb.toString();
+ }
+ });
+ try (ProtobufLogReader reader =
+ (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) {
+ ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row,
+ timestamp);
+ }
+ try (ProtobufLogReader reader =
+ (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) {
+ ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row,
+ timestamp);
+ }
+ }
+
+ @Test
+ public void test() throws Exception {
+ WAL walNoRep = FACTORY.getWAL(REGION_NO_REP);
+ assertThat(walNoRep, not(instanceOf(DualAsyncFSWAL.class)));
+ DualAsyncFSWAL wal = (DualAsyncFSWAL) FACTORY.getWAL(REGION);
+ assertEquals(2, FACTORY.getWALs().size());
+ testReadWrite(wal);
+ SyncReplicationWALProvider walProvider =
+ (SyncReplicationWALProvider) FACTORY.getWALProvider();
+ walProvider.peerRemoved(PEER_ID);
+ assertEquals(1, FACTORY.getWALs().size());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2661a9/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSynchronousReplicationWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSynchronousReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSynchronousReplicationWALProvider.java
deleted file mode 100644
index e6031c6..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSynchronousReplicationWALProvider.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.wal;
-
-import static org.hamcrest.CoreMatchers.instanceOf;
-import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-
-import java.io.IOException;
-import java.util.Optional;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
-import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-import org.apache.hadoop.hbase.regionserver.wal.DualAsyncFSWAL;
-import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
-import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogTestHelper;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({ RegionServerTests.class, MediumTests.class })
-public class TestSynchronousReplicationWALProvider {
-
- private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
- private static String PEER_ID = "1";
-
- private static String REMOTE_WAL_DIR = "/RemoteWAL";
-
- private static TableName TABLE = TableName.valueOf("table");
-
- private static TableName TABLE_NO_REP = TableName.valueOf("table-no-rep");
-
- private static RegionInfo REGION = RegionInfoBuilder.newBuilder(TABLE).build();
-
- private static RegionInfo REGION_NO_REP = RegionInfoBuilder.newBuilder(TABLE_NO_REP).build();
-
- private static WALFactory FACTORY;
-
- private static Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(RegionInfo info) {
- if (info.getTable().equals(TABLE)) {
- return Optional.of(Pair.newPair(PEER_ID, REMOTE_WAL_DIR));
- } else {
- return Optional.empty();
- }
- }
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- UTIL.startMiniDFSCluster(3);
- FACTORY = new WALFactory(UTIL.getConfiguration(), "test",
- TestSynchronousReplicationWALProvider::getPeerIdAndRemoteWALDir);
- UTIL.getTestFileSystem().mkdirs(new Path(REMOTE_WAL_DIR, PEER_ID));
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws IOException {
- FACTORY.close();
- UTIL.shutdownMiniDFSCluster();
- }
-
- private void testReadWrite(DualAsyncFSWAL wal) throws Exception {
- int recordCount = 100;
- int columnCount = 10;
- byte[] row = Bytes.toBytes("testRow");
- long timestamp = System.currentTimeMillis();
- MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
- ProtobufLogTestHelper.doWrite(wal, REGION, TABLE, columnCount, recordCount, row, timestamp,
- mvcc);
- Path localFile = wal.getCurrentFileName();
- Path remoteFile = new Path(REMOTE_WAL_DIR + "/" + PEER_ID, localFile.getName());
- try (ProtobufLogReader reader =
- (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) {
- ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row,
- timestamp);
- }
- try (ProtobufLogReader reader =
- (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) {
- ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row,
- timestamp);
- }
- wal.rollWriter();
- DistributedFileSystem dfs = (DistributedFileSystem) UTIL.getDFSCluster().getFileSystem();
- UTIL.waitFor(5000, new ExplainingPredicate<Exception>() {
-
- @Override
- public boolean evaluate() throws Exception {
- return dfs.isFileClosed(localFile) && dfs.isFileClosed(remoteFile);
- }
-
- @Override
- public String explainFailure() throws Exception {
- StringBuilder sb = new StringBuilder();
- if (!dfs.isFileClosed(localFile)) {
- sb.append(localFile + " has not been closed yet.");
- }
- if (!dfs.isFileClosed(remoteFile)) {
- sb.append(remoteFile + " has not been closed yet.");
- }
- return sb.toString();
- }
- });
- try (ProtobufLogReader reader =
- (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) {
- ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row,
- timestamp);
- }
- try (ProtobufLogReader reader =
- (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) {
- ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row,
- timestamp);
- }
- }
-
- @Test
- public void test() throws Exception {
- WAL walNoRep = FACTORY.getWAL(REGION_NO_REP);
- assertThat(walNoRep, not(instanceOf(DualAsyncFSWAL.class)));
- DualAsyncFSWAL wal = (DualAsyncFSWAL) FACTORY.getWAL(REGION);
- assertEquals(2, FACTORY.getWALs().size());
- testReadWrite(wal);
- SynchronousReplicationWALProvider walProvider =
- (SynchronousReplicationWALProvider) FACTORY.getWALProvider();
- walProvider.peerRemoved(PEER_ID);
- assertEquals(1, FACTORY.getWALs().size());
- }
-}
[09/11] hbase git commit: HBASE-19781 Add a new cluster state flag
for synchronous replication
Posted by zh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 8911982..f5eca39 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
@@ -67,9 +68,9 @@ public class TestHBaseFsckReplication {
String peerId1 = "1";
String peerId2 = "2";
peerStorage.addPeer(peerId1, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
- true);
+ true, SyncReplicationState.NONE);
peerStorage.addPeer(peerId2, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
- true);
+ true, SyncReplicationState.NONE);
for (int i = 0; i < 10; i++) {
queueStorage.addWAL(ServerName.valueOf("localhost", 10000 + i, 100000 + i), peerId1,
"file-" + i);
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-shell/src/main/ruby/hbase/replication_admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index ba7d191..d5d4844 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -20,6 +20,7 @@
include Java
java_import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil
+java_import org.apache.hadoop.hbase.replication.SyncReplicationState
java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig
java_import org.apache.hadoop.hbase.util.Bytes
java_import org.apache.hadoop.hbase.zookeeper.ZKConfig
@@ -329,6 +330,20 @@ module Hbase
'!' + ReplicationPeerConfigUtil.convertToString(tableCFs)
end
+ # Transit current cluster to a new state in the specified synchronous
+ # replication peer
+ def transit_peer_sync_replication_state(id, state)
+ if 'ACTIVE'.eql?(state)
+ @admin.transitReplicationPeerSyncReplicationState(id, SyncReplicationState::ACTIVE)
+ elsif 'DOWNGRADE_ACTIVE'.eql?(state)
+ @admin.transitReplicationPeerSyncReplicationState(id, SyncReplicationState::DOWNGRADE_ACTIVE)
+ elsif 'STANDBY'.eql?(state)
+ @admin.transitReplicationPeerSyncReplicationState(id, SyncReplicationState::STANDBY)
+ else
+ raise(ArgumentError, 'synchronous replication state must be ACTIVE, DOWNGRADE_ACTIVE or STANDBY')
+ end
+ end
+
#----------------------------------------------------------------------------------------------
# Enables a table's replication switch
def enable_tablerep(table_name)
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 507c0a9..0ed71ae 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -395,6 +395,7 @@ Shell.load_command_group(
get_peer_config
list_peer_configs
update_peer_config
+ transit_peer_sync_replication_state
]
)
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index caeab86..aa10fda 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -39,8 +39,8 @@ EOF
peers = replication_admin.list_peers
formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME
- REMOTE_ROOT_DIR STATE REPLICATE_ALL
- NAMESPACES TABLE_CFS BANDWIDTH])
+ REMOTE_ROOT_DIR SYNC_REPLICATION_STATE STATE
+ REPLICATE_ALL NAMESPACES TABLE_CFS BANDWIDTH])
peers.each do |peer|
id = peer.getPeerId
@@ -66,7 +66,7 @@ EOF
remote_root_dir = config.getRemoteWALDir
end
formatter.row([id, cluster_key, endpoint_classname,
- remote_root_dir, state,
+ remote_root_dir, peer.getSyncReplicationState, state,
config.replicateAllUserTables, namespaces, tableCFs,
config.getBandwidth])
end
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-shell/src/main/ruby/shell/commands/transit_peer_sync_replication_state.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/transit_peer_sync_replication_state.rb b/hbase-shell/src/main/ruby/shell/commands/transit_peer_sync_replication_state.rb
new file mode 100644
index 0000000..6d4a963
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/transit_peer_sync_replication_state.rb
@@ -0,0 +1,44 @@
+#
+# Copyright The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+ module Commands
+ class TransitPeerSyncReplicationState < Command
+ def help
+ <<-EOF
+Transit current cluster to new state in the specified synchronous replication peer.
+Examples:
+
+ # Transit cluster state to DOWNGRADE_ACTIVE in a synchronous replication peer
+ hbase> transit_peer_sync_replication_state '1', 'DOWNGRADE_ACTIVE'
+ # Transit cluster state to ACTIVE in a synchronous replication peer
+ hbase> transit_peer_sync_replication_state '1', 'ACTIVE'
+ # Transit cluster state to STANDBY in a synchronous replication peer
+ hbase> transit_peer_sync_replication_state '1', 'STANDBY'
+
+EOF
+ end
+
+ def command(id, state)
+ replication_admin.transit_peer_sync_replication_state(id, state)
+ end
+ end
+ end
+end
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index 7f2b6ae..a758c96 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -23,6 +23,9 @@ require 'hbase/hbase'
require 'hbase/table'
include HBaseConstants
+include Java
+
+java_import org.apache.hadoop.hbase.replication.SyncReplicationState
module Hbase
class ReplicationAdminTest < Test::Unit::TestCase
@@ -490,6 +493,27 @@ module Hbase
command(:remove_peer, @peer_id)
end
+ define_test "transit_peer_sync_replication_state: test" do
+ cluster_key = "server1.cie.com:2181:/hbase"
+ remote_wal_dir = "hdfs://srv1:9999/hbase"
+ args = { CLUSTER_KEY => cluster_key, REMOTE_WAL_DIR => remote_wal_dir }
+ command(:add_peer, @peer_id, args)
+
+ assert_equal(1, command(:list_peers).length)
+ peer = command(:list_peers).get(0)
+ assert_equal(@peer_id, peer.getPeerId)
+ assert_equal(SyncReplicationState::DOWNGRADE_ACTIVE, peer.getSyncReplicationState)
+
+ command(:transit_peer_sync_replication_state, @peer_id, 'ACTIVE')
+ assert_equal(1, command(:list_peers).length)
+ peer = command(:list_peers).get(0)
+ assert_equal(@peer_id, peer.getPeerId)
+ assert_equal(SyncReplicationState::ACTIVE, peer.getSyncReplicationState)
+
+ # cleanup for future tests
+ command(:remove_peer, @peer_id)
+ end
+
define_test "get_peer_config: works with simple clusterKey peer" do
cluster_key = "localhost:2181:/hbase-test"
args = { CLUSTER_KEY => cluster_key }
[03/11] hbase git commit: HBASE-19083 Introduce a new log writer
which can write to two HDFSes
Posted by zh...@apache.org.
HBASE-19083 Introduce a new log writer which can write to two HDFSes
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a3e4b357
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a3e4b357
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a3e4b357
Branch: refs/heads/HBASE-19064
Commit: a3e4b357c855dba752e003379ed4445683e7a1a0
Parents: 32f235a
Author: zhangduo <zh...@apache.org>
Authored: Thu Jan 11 21:08:02 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Feb 10 17:47:53 2018 +0800
----------------------------------------------------------------------
.../hbase/regionserver/wal/AsyncFSWAL.java | 16 ++-
.../regionserver/wal/CombinedAsyncWriter.java | 134 ++++++++++++++++++
.../hbase/regionserver/wal/DualAsyncFSWAL.java | 67 +++++++++
.../wal/AbstractTestProtobufLog.java | 110 +++------------
.../regionserver/wal/ProtobufLogTestHelper.java | 99 ++++++++++++++
.../regionserver/wal/TestAsyncProtobufLog.java | 32 +----
.../wal/TestCombinedAsyncWriter.java | 136 +++++++++++++++++++
.../hbase/regionserver/wal/TestProtobufLog.java | 14 +-
.../regionserver/wal/WriterOverAsyncWriter.java | 63 +++++++++
9 files changed, 531 insertions(+), 140 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e4b357/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 19d89df..8e57441 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -606,12 +606,16 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
}
}
- @Override
- protected AsyncWriter createWriterInstance(Path path) throws IOException {
+ protected final AsyncWriter createAsyncWriter(FileSystem fs, Path path) throws IOException {
return AsyncFSWALProvider.createAsyncWriter(conf, fs, path, false, eventLoopGroup,
channelClass);
}
+ @Override
+ protected AsyncWriter createWriterInstance(Path path) throws IOException {
+ return createAsyncWriter(fs, path);
+ }
+
private void waitForSafePoint() {
consumeLock.lock();
try {
@@ -654,13 +658,13 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
} finally {
consumeLock.unlock();
}
- return executeClose(closeExecutor, oldWriter);
+ return executeClose(oldWriter);
}
@Override
protected void doShutdown() throws IOException {
waitForSafePoint();
- executeClose(closeExecutor, writer);
+ executeClose(writer);
closeExecutor.shutdown();
try {
if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) {
@@ -698,7 +702,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
}
}
- private static long executeClose(ExecutorService closeExecutor, AsyncWriter writer) {
+ protected final long executeClose(AsyncWriter writer) {
long fileLength;
if (writer != null) {
fileLength = writer.getLength();
@@ -706,7 +710,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
try {
writer.close();
} catch (IOException e) {
- LOG.warn("close old writer failed", e);
+ LOG.warn("close writer failed", e);
}
});
} else {
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e4b357/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
new file mode 100644
index 0000000..8ecfede
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+
+/**
+ * An {@link AsyncWriter} wrapper which writes data to a set of {@link AsyncWriter} instances.
+ */
+@InterfaceAudience.Private
+public abstract class CombinedAsyncWriter implements AsyncWriter {
+
+ private static final Logger LOG = LoggerFactory.getLogger(CombinedAsyncWriter.class);
+
+ protected final ImmutableList<AsyncWriter> writers;
+
+ protected CombinedAsyncWriter(ImmutableList<AsyncWriter> writers) {
+ this.writers = writers;
+ }
+
+ @Override
+ public long getLength() {
+ return writers.get(0).getLength();
+ }
+
+ @Override
+ public void close() throws IOException {
+ Exception error = null;
+ for (AsyncWriter writer : writers) {
+ try {
+ writer.close();
+ } catch (Exception e) {
+ LOG.warn("close writer failed", e);
+ if (error == null) {
+ error = e;
+ }
+ }
+ }
+ if (error != null) {
+ throw new IOException("Failed to close at least one writer, please see the warn log above. " +
+ "The cause is the first exception occured", error);
+ }
+ }
+
+ protected abstract void doSync(CompletableFuture<Long> future);
+
+ @Override
+ public CompletableFuture<Long> sync() {
+ CompletableFuture<Long> future = new CompletableFuture<>();
+ doSync(future);
+ return future;
+ }
+
+ @Override
+ public void append(Entry entry) {
+ writers.forEach(w -> w.append(entry));
+ }
+
+ public enum Mode {
+ SEQUENTIAL, PARALLEL
+ }
+
+ public static CombinedAsyncWriter create(Mode mode, AsyncWriter writer, AsyncWriter... writers) {
+ ImmutableList<AsyncWriter> ws =
+ ImmutableList.<AsyncWriter> builder().add(writer).add(writers).build();
+ switch (mode) {
+ case SEQUENTIAL:
+ return new CombinedAsyncWriter(ws) {
+
+ private void doSync(CompletableFuture<Long> future, Long length, int index) {
+ if (index == writers.size()) {
+ future.complete(length);
+ return;
+ }
+ writers.get(index).sync().whenComplete((len, error) -> {
+ if (error != null) {
+ future.completeExceptionally(error);
+ return;
+ }
+ doSync(future, len, index + 1);
+ });
+ }
+
+ @Override
+ protected void doSync(CompletableFuture<Long> future) {
+ doSync(future, null, 0);
+ }
+ };
+ case PARALLEL:
+ return new CombinedAsyncWriter(ws) {
+
+ @Override
+ protected void doSync(CompletableFuture<Long> future) {
+ AtomicInteger remaining = new AtomicInteger(writers.size());
+ writers.forEach(w -> w.sync().whenComplete((length, error) -> {
+ if (error != null) {
+ future.completeExceptionally(error);
+ return;
+ }
+ if (remaining.decrementAndGet() == 0) {
+ future.complete(length);
+ }
+ }));
+ }
+ };
+ default:
+ throw new IllegalArgumentException("Unknown mode: " + mode);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e4b357/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
new file mode 100644
index 0000000..6bf9e02
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.io.netty.channel.Channel;
+import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
+
+/**
+ * An AsyncFSWAL which writes data to two filesystems.
+ */
+@InterfaceAudience.Private
+public class DualAsyncFSWAL extends AsyncFSWAL {
+
+ private final FileSystem remoteFs;
+
+ private final Path remoteWalDir;
+
+ public DualAsyncFSWAL(FileSystem fs, FileSystem remoteFs, Path rootDir, Path remoteRootDir,
+ String logDir, String archiveDir, Configuration conf, List<WALActionsListener> listeners,
+ boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup,
+ Class<? extends Channel> channelClass) throws FailedLogCloseException, IOException {
+ super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix,
+ eventLoopGroup, channelClass);
+ this.remoteFs = remoteFs;
+ this.remoteWalDir = new Path(remoteRootDir, logDir);
+ }
+
+ @Override
+ protected AsyncWriter createWriterInstance(Path path) throws IOException {
+ AsyncWriter localWriter = super.createWriterInstance(path);
+ AsyncWriter remoteWriter;
+ boolean succ = false;
+ try {
+ remoteWriter = createAsyncWriter(remoteFs, new Path(remoteWalDir, path.getName()));
+ succ = true;
+ } finally {
+ if (!succ) {
+ executeClose(localWriter);
+ }
+ }
+ return CombinedAsyncWriter.create(CombinedAsyncWriter.Mode.SEQUENTIAL, remoteWriter,
+ localWriter);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e4b357/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
index c3f3277..5098609 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
@@ -18,33 +18,15 @@
*/
package org.apache.hadoop.hbase.regionserver.wal;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALFactory;
-import org.apache.hadoop.hbase.wal.WALKeyImpl;
+import org.apache.hadoop.hbase.wal.WALProvider;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
@@ -56,8 +38,8 @@ import org.junit.rules.TestName;
/**
* WAL tests that can be reused across providers.
*/
-public abstract class AbstractTestProtobufLog<W extends Closeable> {
- protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+public abstract class AbstractTestProtobufLog {
+ protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
protected FileSystem fs;
protected Path dir;
@@ -93,14 +75,7 @@ public abstract class AbstractTestProtobufLog<W extends Closeable> {
TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000);
// faster failover with cluster.shutdown();fs.close() idiom
- TEST_UTIL.getConfiguration()
- .setInt("hbase.ipc.client.connect.max.retries", 1);
- TEST_UTIL.getConfiguration().setInt(
- "dfs.client.block.recovery.retries", 1);
- TEST_UTIL.getConfiguration().setInt(
- "hbase.ipc.client.connection.maxidletime", 500);
- TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
- SampleRegionWALCoprocessor.class.getName());
+ TEST_UTIL.getConfiguration().setInt("dfs.client.block.recovery.retries", 1);
TEST_UTIL.startMiniDFSCluster(3);
}
@@ -131,77 +106,24 @@ public abstract class AbstractTestProtobufLog<W extends Closeable> {
* @throws IOException
*/
private void doRead(boolean withTrailer) throws IOException {
- final int columnCount = 5;
- final int recordCount = 5;
- final TableName tableName =
- TableName.valueOf("tablename");
- final byte[] row = Bytes.toBytes("row");
+ int columnCount = 5;
+ int recordCount = 5;
+ TableName tableName = TableName.valueOf("tablename");
+ byte[] row = Bytes.toBytes("row");
long timestamp = System.currentTimeMillis();
Path path = new Path(dir, "tempwal");
// delete the log if already exists, for test only
fs.delete(path, true);
- W writer = null;
- ProtobufLogReader reader = null;
- try {
- HRegionInfo hri = new HRegionInfo(tableName,
- HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
- HTableDescriptor htd = new HTableDescriptor(tableName);
- fs.mkdirs(dir);
- // Write log in pb format.
- writer = createWriter(path);
- for (int i = 0; i < recordCount; ++i) {
- WALKeyImpl key = new WALKeyImpl(
- hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
- WALEdit edit = new WALEdit();
- for (int j = 0; j < columnCount; ++j) {
- if (i == 0) {
- htd.addFamily(new HColumnDescriptor("column" + j));
- }
- String value = i + "" + j;
- edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
- }
- append(writer, new WAL.Entry(key, edit));
- }
- sync(writer);
- if (withTrailer) writer.close();
-
- // Now read the log using standard means.
- reader = (ProtobufLogReader) wals.createReader(fs, path);
- if (withTrailer) {
- assertNotNull(reader.trailer);
- } else {
- assertNull(reader.trailer);
- }
- for (int i = 0; i < recordCount; ++i) {
- WAL.Entry entry = reader.next();
- assertNotNull(entry);
- assertEquals(columnCount, entry.getEdit().size());
- assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
- assertEquals(tableName, entry.getKey().getTableName());
- int idx = 0;
- for (Cell val : entry.getEdit().getCells()) {
- assertTrue(Bytes.equals(row, 0, row.length, val.getRowArray(), val.getRowOffset(),
- val.getRowLength()));
- String value = i + "" + idx;
- assertArrayEquals(Bytes.toBytes(value), CellUtil.cloneValue(val));
- idx++;
- }
- }
- WAL.Entry entry = reader.next();
- assertNull(entry);
- } finally {
- if (writer != null) {
- writer.close();
- }
- if (reader != null) {
- reader.close();
+ fs.mkdirs(dir);
+ try (WALProvider.Writer writer = createWriter(path)) {
+ ProtobufLogTestHelper.doWrite(writer, withTrailer, tableName, columnCount, recordCount, row,
+ timestamp);
+ try (ProtobufLogReader reader = (ProtobufLogReader) wals.createReader(fs, path)) {
+ ProtobufLogTestHelper.doRead(reader, withTrailer, tableName, columnCount, recordCount, row,
+ timestamp);
}
}
}
- protected abstract W createWriter(Path path) throws IOException;
-
- protected abstract void append(W writer, WAL.Entry entry) throws IOException;
-
- protected abstract void sync(W writer) throws IOException;
+ protected abstract WALProvider.Writer createWriter(Path path) throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e4b357/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java
new file mode 100644
index 0000000..ecd8e6c
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.stream.IntStream;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALKeyImpl;
+import org.apache.hadoop.hbase.wal.WALProvider;
+
+/**
+ * Helper class for testing protobuf log.
+ */
+final class ProtobufLogTestHelper {
+
+ private ProtobufLogTestHelper() {
+ }
+
+ private static byte[] toValue(int prefix, int suffix) {
+ return Bytes.toBytes(prefix + "-" + suffix);
+ }
+
+ private static RegionInfo toRegionInfo(TableName tableName) {
+ return RegionInfoBuilder.newBuilder(tableName).setRegionId(1024).build();
+ }
+
+ public static void doWrite(WALProvider.Writer writer, boolean withTrailer, TableName tableName,
+ int columnCount, int recordCount, byte[] row, long timestamp) throws IOException {
+ RegionInfo hri = toRegionInfo(tableName);
+ for (int i = 0; i < recordCount; i++) {
+ WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, i, timestamp,
+ HConstants.DEFAULT_CLUSTER_ID);
+ WALEdit edit = new WALEdit();
+ int prefix = i;
+ IntStream.range(0, columnCount).mapToObj(j -> toValue(prefix, j))
+ .map(value -> new KeyValue(row, row, row, timestamp, value)).forEachOrdered(edit::add);
+ writer.append(new WAL.Entry(key, edit));
+ }
+ writer.sync();
+ if (withTrailer) {
+ writer.close();
+ }
+ }
+
+ public static void doRead(ProtobufLogReader reader, boolean withTrailer, TableName tableName,
+ int columnCount, int recordCount, byte[] row, long timestamp) throws IOException {
+ if (withTrailer) {
+ assertNotNull(reader.trailer);
+ } else {
+ assertNull(reader.trailer);
+ }
+ RegionInfo hri = toRegionInfo(tableName);
+ for (int i = 0; i < recordCount; ++i) {
+ WAL.Entry entry = reader.next();
+ assertNotNull(entry);
+ assertEquals(columnCount, entry.getEdit().size());
+ assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
+ assertEquals(tableName, entry.getKey().getTableName());
+ int idx = 0;
+ for (Cell val : entry.getEdit().getCells()) {
+ assertTrue(Bytes.equals(row, 0, row.length, val.getRowArray(), val.getRowOffset(),
+ val.getRowLength()));
+ assertArrayEquals(toValue(i, idx), CellUtil.cloneValue(val));
+ idx++;
+ }
+ }
+ assertNull(reader.next());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e4b357/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java
index 0ea75b6..7626dcf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java
@@ -18,29 +18,24 @@
package org.apache.hadoop.hbase.regionserver.wal;
import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.concurrent.ExecutionException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
-import org.apache.hadoop.hbase.wal.WAL.Entry;
-import org.apache.hadoop.hbase.wal.WALProvider;
-import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
+import org.apache.hadoop.hbase.wal.WALProvider.Writer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
@Category({ RegionServerTests.class, MediumTests.class })
-public class TestAsyncProtobufLog extends AbstractTestProtobufLog<WALProvider.AsyncWriter> {
+public class TestAsyncProtobufLog extends AbstractTestProtobufLog {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
@@ -64,25 +59,8 @@ public class TestAsyncProtobufLog extends AbstractTestProtobufLog<WALProvider.As
}
@Override
- protected AsyncWriter createWriter(Path path) throws IOException {
- return AsyncFSWALProvider.createAsyncWriter(TEST_UTIL.getConfiguration(), fs, path, false,
- EVENT_LOOP_GROUP.next(), CHANNEL_CLASS);
- }
-
- @Override
- protected void append(AsyncWriter writer, Entry entry) throws IOException {
- writer.append(entry);
- }
-
- @Override
- protected void sync(AsyncWriter writer) throws IOException {
- try {
- writer.sync().get();
- } catch (InterruptedException e) {
- throw new InterruptedIOException();
- } catch (ExecutionException e) {
- Throwables.propagateIfPossible(e.getCause());
- throw new IOException(e.getCause());
- }
+ protected Writer createWriter(Path path) throws IOException {
+ return new WriterOverAsyncWriter(AsyncFSWALProvider.createAsyncWriter(
+ TEST_UTIL.getConfiguration(), fs, path, false, EVENT_LOOP_GROUP.next(), CHANNEL_CLASS));
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e4b357/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
new file mode 100644
index 0000000..d74f9d8
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
+import org.apache.hadoop.hbase.wal.AsyncFSWALProvider.AsyncWriter;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+
+import org.apache.hbase.thirdparty.io.netty.channel.Channel;
+import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
+import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
+import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
+
+@RunWith(Parameterized.class)
+@Category({ RegionServerTests.class, MediumTests.class })
+public class TestCombinedAsyncWriter {
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static EventLoopGroup EVENT_LOOP_GROUP;
+
+ private static Class<? extends Channel> CHANNEL_CLASS;
+
+ private static WALFactory WALS;
+
+ @Rule
+ public final TestName name = new TestName();
+
+ @Parameter
+ public CombinedAsyncWriter.Mode mode;
+
+ @Parameters(name = "{index}: mode={0}")
+ public static List<Object[]> params() {
+ return Arrays.asList(new Object[] { CombinedAsyncWriter.Mode.SEQUENTIAL },
+ new Object[] { CombinedAsyncWriter.Mode.PARALLEL });
+ }
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ EVENT_LOOP_GROUP = new NioEventLoopGroup();
+ CHANNEL_CLASS = NioSocketChannel.class;
+ UTIL.startMiniDFSCluster(3);
+ UTIL.getTestFileSystem().mkdirs(UTIL.getDataTestDirOnTestFS());
+ WALS =
+ new WALFactory(UTIL.getConfiguration(), null, TestCombinedAsyncWriter.class.getSimpleName());
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ if (WALS != null) {
+ WALS.close();
+ }
+ EVENT_LOOP_GROUP.shutdownGracefully().syncUninterruptibly();
+ UTIL.shutdownMiniDFSCluster();
+ }
+
+ @Test
+ public void testWithTrailer() throws IOException {
+ doTest(true);
+ }
+
+ @Test
+ public void testWithoutTrailer() throws IOException {
+ doTest(false);
+ }
+
+ private Path getPath(int index) throws IOException {
+ String methodName = name.getMethodName().replaceAll("[^A-Za-z0-9_-]", "_");
+ return new Path(UTIL.getDataTestDirOnTestFS(), methodName + "-" + index);
+ }
+
+ private void doTest(boolean withTrailer) throws IOException {
+ int columnCount = 5;
+ int recordCount = 5;
+ TableName tableName = TableName.valueOf("tablename");
+ byte[] row = Bytes.toBytes("row");
+ long timestamp = System.currentTimeMillis();
+ Path path1 = getPath(1);
+ Path path2 = getPath(2);
+ FileSystem fs = UTIL.getTestFileSystem();
+ Configuration conf = UTIL.getConfiguration();
+ try (
+ AsyncWriter writer1 = AsyncFSWALProvider.createAsyncWriter(conf, fs, path1, false,
+ EVENT_LOOP_GROUP.next(), CHANNEL_CLASS);
+ AsyncWriter writer2 = AsyncFSWALProvider.createAsyncWriter(conf, fs, path2, false,
+ EVENT_LOOP_GROUP.next(), CHANNEL_CLASS);
+ CombinedAsyncWriter writer = CombinedAsyncWriter.create(mode, writer1, writer2)) {
+ ProtobufLogTestHelper.doWrite(new WriterOverAsyncWriter(writer), withTrailer, tableName,
+ columnCount, recordCount, row, timestamp);
+ try (ProtobufLogReader reader = (ProtobufLogReader) WALS.createReader(fs, path1)) {
+ ProtobufLogTestHelper.doRead(reader, withTrailer, tableName, columnCount, recordCount, row,
+ timestamp);
+ }
+ try (ProtobufLogReader reader = (ProtobufLogReader) WALS.createReader(fs, path2)) {
+ ProtobufLogTestHelper.doRead(reader, withTrailer, tableName, columnCount, recordCount, row,
+ timestamp);
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e4b357/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java
index 080b5be..d429a01 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java
@@ -23,14 +23,12 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.wal.FSHLogProvider;
-import org.apache.hadoop.hbase.wal.WAL.Entry;
-import org.apache.hadoop.hbase.wal.WALProvider;
import org.apache.hadoop.hbase.wal.WALProvider.Writer;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
@Category({ RegionServerTests.class, MediumTests.class })
-public class TestProtobufLog extends AbstractTestProtobufLog<WALProvider.Writer> {
+public class TestProtobufLog extends AbstractTestProtobufLog {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
@@ -40,14 +38,4 @@ public class TestProtobufLog extends AbstractTestProtobufLog<WALProvider.Writer>
protected Writer createWriter(Path path) throws IOException {
return FSHLogProvider.createWriter(TEST_UTIL.getConfiguration(), fs, path, false);
}
-
- @Override
- protected void append(Writer writer, Entry entry) throws IOException {
- writer.append(entry);
- }
-
- @Override
- protected void sync(Writer writer) throws IOException {
- writer.sync();
- }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e4b357/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WriterOverAsyncWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WriterOverAsyncWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WriterOverAsyncWriter.java
new file mode 100644
index 0000000..da70ee5
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WriterOverAsyncWriter.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.concurrent.ExecutionException;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WALProvider;
+import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
+
+class WriterOverAsyncWriter implements WALProvider.Writer {
+
+ private final WALProvider.AsyncWriter asyncWriter;
+
+ public WriterOverAsyncWriter(AsyncWriter asyncWriter) {
+ this.asyncWriter = asyncWriter;
+ }
+
+ @Override
+ public void close() throws IOException {
+ asyncWriter.close();
+ }
+
+ @Override
+ public long getLength() {
+ return asyncWriter.getLength();
+ }
+
+ @Override
+ public void sync() throws IOException {
+ try {
+ asyncWriter.sync().get();
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException();
+ } catch (ExecutionException e) {
+ Throwables.propagateIfPossible(e.getCause(), IOException.class);
+ throw new IOException(e.getCause());
+ }
+ }
+
+ @Override
+ public void append(Entry entry) throws IOException {
+ asyncWriter.append(entry);
+ }
+}
[10/11] hbase git commit: HBASE-19781 Add a new cluster state flag
for synchronous replication
Posted by zh...@apache.org.
HBASE-19781 Add a new cluster state flag for synchronous replication
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6fea1c97
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6fea1c97
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6fea1c97
Branch: refs/heads/HBASE-19064
Commit: 6fea1c976f81569eb4690f33d65151370cfd6b77
Parents: 1851e44
Author: Guanghao Zhang <zg...@apache.org>
Authored: Mon Jan 22 11:44:49 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Feb 10 17:49:13 2018 +0800
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/client/Admin.java | 39 +++++
.../apache/hadoop/hbase/client/AsyncAdmin.java | 31 ++++
.../hadoop/hbase/client/AsyncHBaseAdmin.java | 7 +
.../hbase/client/ConnectionImplementation.java | 9 ++
.../apache/hadoop/hbase/client/HBaseAdmin.java | 26 +++
.../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 15 ++
.../client/ShortCircuitMasterConnection.java | 9 ++
.../replication/ReplicationPeerConfigUtil.java | 26 +--
.../replication/ReplicationPeerDescription.java | 10 +-
.../hbase/replication/SyncReplicationState.java | 48 ++++++
.../hbase/shaded/protobuf/RequestConverter.java | 10 ++
.../src/main/protobuf/Master.proto | 4 +
.../src/main/protobuf/MasterProcedure.proto | 6 +-
.../src/main/protobuf/Replication.proto | 20 +++
.../replication/ReplicationPeerStorage.java | 18 ++-
.../hbase/replication/ReplicationUtils.java | 1 +
.../replication/ZKReplicationPeerStorage.java | 60 +++++--
.../replication/TestReplicationStateBasic.java | 23 ++-
.../TestZKReplicationPeerStorage.java | 12 +-
.../hbase/coprocessor/MasterObserver.java | 23 +++
.../org/apache/hadoop/hbase/master/HMaster.java | 12 ++
.../hbase/master/MasterCoprocessorHost.java | 21 +++
.../hadoop/hbase/master/MasterRpcServices.java | 17 ++
.../hadoop/hbase/master/MasterServices.java | 9 ++
.../procedure/PeerProcedureInterface.java | 2 +-
.../replication/ReplicationPeerManager.java | 51 +++++-
...ransitPeerSyncReplicationStateProcedure.java | 159 +++++++++++++++++++
.../hbase/security/access/AccessController.java | 8 +
.../replication/TestReplicationAdmin.java | 62 ++++++++
.../hbase/master/MockNoopMasterServices.java | 11 +-
.../cleaner/TestReplicationHFileCleaner.java | 4 +-
.../TestReplicationTrackerZKImpl.java | 6 +-
.../TestReplicationSourceManager.java | 3 +-
.../security/access/TestAccessController.java | 16 ++
.../hbase/util/TestHBaseFsckReplication.java | 5 +-
.../src/main/ruby/hbase/replication_admin.rb | 15 ++
hbase-shell/src/main/ruby/shell.rb | 1 +
.../src/main/ruby/shell/commands/list_peers.rb | 6 +-
.../transit_peer_sync_replication_state.rb | 44 +++++
.../test/ruby/hbase/replication_admin_test.rb | 24 +++
40 files changed, 818 insertions(+), 55 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index b8546fa..167d6f3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -2648,6 +2649,44 @@ public interface Admin extends Abortable, Closeable {
List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws IOException;
/**
+ * Transit current cluster to a new state in a synchronous replication peer.
+ * @param peerId a short name that identifies the peer
+ * @param state a new state of current cluster
+ * @throws IOException if a remote or network exception occurs
+ */
+ void transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
+ throws IOException;
+
+ /**
+ * Transit current cluster to a new state in a synchronous replication peer. But does not block
+ * and wait for it.
+ * <p>
+ * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
+ * ExecutionException if there was an error while executing the operation or TimeoutException in
+ * case the wait timeout was not long enough to allow the operation to complete.
+ * @param peerId a short name that identifies the peer
+ * @param state a new state of current cluster
+ * @throws IOException if a remote or network exception occurs
+ */
+ Future<Void> transitReplicationPeerSyncReplicationStateAsync(String peerId,
+ SyncReplicationState state) throws IOException;
+
+ /**
+ * Get the current cluster state in a synchronous replication peer.
+ * @param peerId a short name that identifies the peer
+ * @return the current cluster state
+ * @throws IOException if a remote or network exception occurs
+ */
+ default SyncReplicationState getReplicationPeerSyncReplicationState(String peerId)
+ throws IOException {
+ List<ReplicationPeerDescription> peers = listReplicationPeers(Pattern.compile(peerId));
+ if (peers.isEmpty() || !peers.get(0).getPeerId().equals(peerId)) {
+ throw new IOException("Replication peer " + peerId + " does not exist");
+ }
+ return peers.get(0).getSyncReplicationState();
+ }
+
+ /**
* Mark region server(s) as decommissioned to prevent additional regions from getting
* assigned to them. Optionally unload the regions on the servers. If there are multiple servers
* to be decommissioned, decommissioning them at the same time can prevent wasteful region
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 35cdd3f..895e7ff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
import com.google.protobuf.RpcChannel;
+import java.io.IOException;
import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
@@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -600,6 +602,35 @@ public interface AsyncAdmin {
ReplicationPeerConfig peerConfig);
/**
+ * Transit current cluster to a new state in a synchronous replication peer.
+ * @param peerId a short name that identifies the peer
+ * @param state a new state of current cluster
+ */
+ CompletableFuture<Void> transitReplicationPeerSyncReplicationState(String peerId,
+ SyncReplicationState state);
+
+ /**
+ * Get the current cluster state in a synchronous replication peer.
+ * @param peerId a short name that identifies the peer
+ * @return the current cluster state wrapped by a {@link CompletableFuture}.
+ */
+ default CompletableFuture<SyncReplicationState>
+ getReplicationPeerSyncReplicationState(String peerId) {
+ CompletableFuture<SyncReplicationState> future = new CompletableFuture<>();
+ listReplicationPeers(Pattern.compile(peerId)).whenComplete((peers, error) -> {
+ if (error != null) {
+ future.completeExceptionally(error);
+ } else if (peers.isEmpty() || !peers.get(0).getPeerId().equals(peerId)) {
+ future.completeExceptionally(
+ new IOException("Replication peer " + peerId + " does not exist"));
+ } else {
+ future.complete(peers.get(0).getSyncReplicationState());
+ }
+ });
+ return future;
+ }
+
+ /**
* Append the replicable table-cf config of the specified peer
* @param peerId a short that identifies the cluster
* @param tableCfs A map from tableName to column family names
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index 9b2390c..44771fd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -414,6 +415,12 @@ class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<Void> transitReplicationPeerSyncReplicationState(String peerId,
+ SyncReplicationState clusterState) {
+ return wrap(rawAdmin.transitReplicationPeerSyncReplicationState(peerId, clusterState));
+ }
+
+ @Override
public CompletableFuture<Void> appendReplicationPeerTableCFs(String peerId,
Map<TableName, List<String>> tableCfs) {
return wrap(rawAdmin.appendReplicationPeerTableCFs(peerId, tableCfs));
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 8807884..5407c6d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -123,6 +123,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListR
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
@@ -1724,6 +1726,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
MasterProtos.ClearDeadServersRequest request) throws ServiceException {
return stub.clearDeadServers(controller, request);
}
+
+ @Override
+ public TransitReplicationPeerSyncReplicationStateResponse
+ transitReplicationPeerSyncReplicationState(RpcController controller,
+ TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException {
+ return stub.transitReplicationPeerSyncReplicationState(controller, request);
+ }
};
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 8685984..c01b891 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
@@ -206,6 +207,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Disab
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
@@ -3991,6 +3993,30 @@ public class HBaseAdmin implements Admin {
}
@Override
+ public void transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
+ throws IOException {
+ get(transitReplicationPeerSyncReplicationStateAsync(peerId, state), this.syncWaitTimeout,
+ TimeUnit.MILLISECONDS);
+ }
+
+ @Override
+ public Future<Void> transitReplicationPeerSyncReplicationStateAsync(String peerId,
+ SyncReplicationState state) throws IOException {
+ TransitReplicationPeerSyncReplicationStateResponse response =
+ executeCallable(new MasterCallable<TransitReplicationPeerSyncReplicationStateResponse>(
+ getConnection(), getRpcControllerFactory()) {
+ @Override
+ protected TransitReplicationPeerSyncReplicationStateResponse rpcCall() throws Exception {
+ return master.transitReplicationPeerSyncReplicationState(getRpcController(),
+ RequestConverter.buildTransitReplicationPeerSyncReplicationStateRequest(peerId,
+ state));
+ }
+ });
+ return new ReplicationFuture(this, peerId, response.getProcId(),
+ () -> "TRANSIT_REPLICATION_PEER_SYNCHRONOUS_REPLICATION_STATE");
+ }
+
+ @Override
public void appendReplicationPeerTableCFs(String id,
Map<TableName, List<String>> tableCfs)
throws ReplicationException, IOException {
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 050bfe2..30a372d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.quotas.QuotaTableUtil;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -255,6 +256,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListR
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
@@ -1613,6 +1616,18 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<Void> transitReplicationPeerSyncReplicationState(String peerId,
+ SyncReplicationState clusterState) {
+ return this
+ .<TransitReplicationPeerSyncReplicationStateRequest, TransitReplicationPeerSyncReplicationStateResponse> procedureCall(
+ RequestConverter.buildTransitReplicationPeerSyncReplicationStateRequest(peerId,
+ clusterState),
+ (s, c, req, done) -> s.transitReplicationPeerSyncReplicationState(c, req, done),
+ (resp) -> resp.getProcId(), new ReplicationProcedureBiConsumer(peerId,
+ () -> "TRANSIT_REPLICATION_PEER_SYNCHRONOUS_REPLICATION_STATE"));
+ }
+
+ @Override
public CompletableFuture<Void> appendReplicationPeerTableCFs(String id,
Map<TableName, List<String>> tableCfs) {
if (tableCfs == null) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
index 50690b4..7bb65d2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
@@ -166,6 +166,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListR
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
@@ -638,4 +640,11 @@ public class ShortCircuitMasterConnection implements MasterKeepAliveConnection {
throws ServiceException {
return stub.splitRegion(controller, request);
}
+
+ @Override
+ public TransitReplicationPeerSyncReplicationStateResponse
+ transitReplicationPeerSyncReplicationState(RpcController controller,
+ TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException {
+ return stub.transitReplicationPeerSyncReplicationState(controller, request);
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 642149b..86b49ea 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@@ -391,25 +392,28 @@ public final class ReplicationPeerConfigUtil {
return ProtobufUtil.prependPBMagic(bytes);
}
- public static ReplicationPeerDescription toReplicationPeerDescription(
- ReplicationProtos.ReplicationPeerDescription desc) {
- boolean enabled = ReplicationProtos.ReplicationState.State.ENABLED == desc.getState()
- .getState();
+ public static ReplicationPeerDescription
+ toReplicationPeerDescription(ReplicationProtos.ReplicationPeerDescription desc) {
+ boolean enabled =
+ ReplicationProtos.ReplicationState.State.ENABLED == desc.getState().getState();
ReplicationPeerConfig config = convert(desc.getConfig());
- return new ReplicationPeerDescription(desc.getId(), enabled, config);
+ return new ReplicationPeerDescription(desc.getId(), enabled, config,
+ SyncReplicationState.valueOf(desc.getSyncReplicationState().getNumber()));
}
- public static ReplicationProtos.ReplicationPeerDescription toProtoReplicationPeerDescription(
- ReplicationPeerDescription desc) {
+ public static ReplicationProtos.ReplicationPeerDescription
+ toProtoReplicationPeerDescription(ReplicationPeerDescription desc) {
ReplicationProtos.ReplicationPeerDescription.Builder builder =
ReplicationProtos.ReplicationPeerDescription.newBuilder();
builder.setId(desc.getPeerId());
- ReplicationProtos.ReplicationState.Builder stateBuilder = ReplicationProtos.ReplicationState
- .newBuilder();
- stateBuilder.setState(desc.isEnabled() ? ReplicationProtos.ReplicationState.State.ENABLED
- : ReplicationProtos.ReplicationState.State.DISABLED);
+ ReplicationProtos.ReplicationState.Builder stateBuilder =
+ ReplicationProtos.ReplicationState.newBuilder();
+ stateBuilder.setState(desc.isEnabled() ? ReplicationProtos.ReplicationState.State.ENABLED :
+ ReplicationProtos.ReplicationState.State.DISABLED);
builder.setState(stateBuilder.build());
builder.setConfig(convert(desc.getPeerConfig()));
+ builder.setSyncReplicationState(
+ ReplicationProtos.SyncReplicationState.forNumber(desc.getSyncReplicationState().ordinal()));
return builder.build();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
index ba97d07..2d077c5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
@@ -28,11 +28,14 @@ public class ReplicationPeerDescription {
private final String id;
private final boolean enabled;
private final ReplicationPeerConfig config;
+ private final SyncReplicationState syncReplicationState;
- public ReplicationPeerDescription(String id, boolean enabled, ReplicationPeerConfig config) {
+ public ReplicationPeerDescription(String id, boolean enabled, ReplicationPeerConfig config,
+ SyncReplicationState syncReplicationState) {
this.id = id;
this.enabled = enabled;
this.config = config;
+ this.syncReplicationState = syncReplicationState;
}
public String getPeerId() {
@@ -47,11 +50,16 @@ public class ReplicationPeerDescription {
return this.config;
}
+ public SyncReplicationState getSyncReplicationState() {
+ return this.syncReplicationState;
+ }
+
@Override
public String toString() {
StringBuilder builder = new StringBuilder("id : ").append(id);
builder.append(", enabled : " + enabled);
builder.append(", config : " + config);
+ builder.append(", syncReplicationState : " + syncReplicationState);
return builder.toString();
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
new file mode 100644
index 0000000..bd144e9
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Used by synchronous replication. Indicate the state of the current cluster in a synchronous
+ * replication peer. The state may be one of {@link SyncReplicationState#ACTIVE},
+ * {@link SyncReplicationState#DOWNGRADE_ACTIVE} or
+ * {@link SyncReplicationState#STANDBY}.
+ * <p>
+ * For asynchronous replication, the state is {@link SyncReplicationState#NONE}.
+ */
+@InterfaceAudience.Public
+public enum SyncReplicationState {
+ NONE, ACTIVE, DOWNGRADE_ACTIVE, STANDBY;
+
+ public static SyncReplicationState valueOf(int value) {
+ switch (value) {
+ case 0:
+ return NONE;
+ case 1:
+ return ACTIVE;
+ case 2:
+ return DOWNGRADE_ACTIVE;
+ case 3:
+ return STANDBY;
+ default:
+ throw new IllegalArgumentException("Unknown synchronous replication state " + value);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 8ac7058..659be2a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
@@ -146,6 +147,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Enabl
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
/**
@@ -1874,4 +1876,12 @@ public final class RequestConverter {
}
return pbServers;
}
+
+ public static TransitReplicationPeerSyncReplicationStateRequest
+ buildTransitReplicationPeerSyncReplicationStateRequest(String peerId,
+ SyncReplicationState state) {
+ return TransitReplicationPeerSyncReplicationStateRequest.newBuilder().setPeerId(peerId)
+ .setSyncReplicationState(ReplicationProtos.SyncReplicationState.forNumber(state.ordinal()))
+ .build();
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index 3a236c0..c2ab180 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -962,6 +962,10 @@ service MasterService {
rpc ListReplicationPeers(ListReplicationPeersRequest)
returns(ListReplicationPeersResponse);
+ /** Transit the state of current cluster in a synchronous replication peer */
+ rpc TransitReplicationPeerSyncReplicationState(TransitReplicationPeerSyncReplicationStateRequest)
+ returns(TransitReplicationPeerSyncReplicationStateResponse);
+
/** Returns a list of ServerNames marked as decommissioned. */
rpc ListDecommissionedRegionServers(ListDecommissionedRegionServersRequest)
returns(ListDecommissionedRegionServersResponse);
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 83099c3..1dffd33 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -409,4 +409,8 @@ message AddPeerStateData {
message UpdatePeerConfigStateData {
required ReplicationPeer peer_config = 1;
-}
\ No newline at end of file
+}
+
+message TransitPeerSyncReplicationStateStateData {
+ required SyncReplicationState syncReplicationState = 1;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-protocol-shaded/src/main/protobuf/Replication.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
index 44295d8..de7b742 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Replication.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
@@ -63,12 +63,23 @@ message ReplicationState {
}
/**
+ * Indicate the state of the current cluster in a synchronous replication peer.
+ */
+enum SyncReplicationState {
+ NONE = 0;
+ ACTIVE = 1;
+ DOWNGRADE_ACTIVE = 2;
+ STANDBY = 3;
+}
+
+/**
* Used by replication. Description of the replication peer.
*/
message ReplicationPeerDescription {
required string id = 1;
required ReplicationState state = 2;
required ReplicationPeer config = 3;
+ optional SyncReplicationState syncReplicationState = 4;
}
/**
@@ -137,3 +148,12 @@ message ListReplicationPeersRequest {
message ListReplicationPeersResponse {
repeated ReplicationPeerDescription peer_desc = 1;
}
+
+message TransitReplicationPeerSyncReplicationStateRequest {
+ required string peer_id = 1;
+ required SyncReplicationState syncReplicationState = 2;
+}
+
+message TransitReplicationPeerSyncReplicationStateResponse {
+ required uint64 proc_id = 1;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
index 1adda02..d2538ab 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -31,8 +31,8 @@ public interface ReplicationPeerStorage {
* Add a replication peer.
* @throws ReplicationException if there are errors accessing the storage service.
*/
- void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
- throws ReplicationException;
+ void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled,
+ SyncReplicationState syncReplicationState) throws ReplicationException;
/**
* Remove a replication peer.
@@ -70,4 +70,18 @@ public interface ReplicationPeerStorage {
* @throws ReplicationException if there are errors accessing the storage service.
*/
ReplicationPeerConfig getPeerConfig(String peerId) throws ReplicationException;
+
+ /**
+ * Set the state of current cluster in a synchronous replication peer.
+ * @throws ReplicationException if there are errors accessing the storage service.
+ */
+ void setPeerSyncReplicationState(String peerId, SyncReplicationState state)
+ throws ReplicationException;
+
+ /**
+ * Get the state of current cluster in a synchronous replication peer.
+ * @throws ReplicationException if there are errors accessing the storage service.
+ */
+ SyncReplicationState getPeerSyncReplicationState(String peerId)
+ throws ReplicationException;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index 11507aa..d633be9 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -22,6 +22,7 @@ import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CompoundConfiguration;
import org.apache.hadoop.hbase.HBaseConfiguration;
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
index a53500a..338ce3f 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java
@@ -22,6 +22,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
@@ -51,6 +52,8 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
public static final byte[] DISABLED_ZNODE_BYTES =
toByteArray(ReplicationProtos.ReplicationState.State.DISABLED);
+ public static final String SYNCHRONOUS_REPLICATION_STATE_ZNODE = "sync-rep-state";
+
/**
* The name of the znode that contains the replication status of a remote slave (i.e. peer)
* cluster.
@@ -79,21 +82,29 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
return ZNodePaths.joinZNode(peersZNode, peerId);
}
+ @VisibleForTesting
+ public String getSyncReplicationStateNode(String peerId) {
+ return ZNodePaths.joinZNode(getPeerNode(peerId), SYNCHRONOUS_REPLICATION_STATE_ZNODE);
+ }
+
@Override
- public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
- throws ReplicationException {
+ public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled,
+ SyncReplicationState syncReplicationState) throws ReplicationException {
+ List<ZKUtilOp> multiOps = Arrays.asList(
+ ZKUtilOp.createAndFailSilent(getPeerNode(peerId),
+ ReplicationPeerConfigUtil.toByteArray(peerConfig)),
+ ZKUtilOp.createAndFailSilent(getPeerStateNode(peerId),
+ enabled ? ENABLED_ZNODE_BYTES : DISABLED_ZNODE_BYTES),
+ ZKUtilOp.createAndFailSilent(getSyncReplicationStateNode(peerId),
+ Bytes.toBytes(syncReplicationState.ordinal())));
try {
ZKUtil.createWithParents(zookeeper, peersZNode);
- ZKUtil.multiOrSequential(zookeeper,
- Arrays.asList(
- ZKUtilOp.createAndFailSilent(getPeerNode(peerId),
- ReplicationPeerConfigUtil.toByteArray(peerConfig)),
- ZKUtilOp.createAndFailSilent(getPeerStateNode(peerId),
- enabled ? ENABLED_ZNODE_BYTES : DISABLED_ZNODE_BYTES)),
- false);
+ ZKUtil.multiOrSequential(zookeeper, multiOps, false);
} catch (KeeperException e) {
- throw new ReplicationException("Could not add peer with id=" + peerId + ", peerConfif=>"
- + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED"), e);
+ throw new ReplicationException(
+ "Could not add peer with id=" + peerId + ", peerConfig=>" + peerConfig + ", state=" +
+ (enabled ? "ENABLED" : "DISABLED") + ", syncReplicationState=" + syncReplicationState,
+ e);
}
}
@@ -166,4 +177,31 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase
"Failed to parse replication peer config for peer with id=" + peerId, e);
}
}
+
+ @Override
+ public void setPeerSyncReplicationState(String peerId, SyncReplicationState clusterState)
+ throws ReplicationException {
+ byte[] clusterStateBytes = Bytes.toBytes(clusterState.ordinal());
+ try {
+ ZKUtil.setData(zookeeper, getSyncReplicationStateNode(peerId), clusterStateBytes);
+ } catch (KeeperException e) {
+ throw new ReplicationException(
+ "Unable to change the cluster state for the synchronous replication peer with id=" +
+ peerId,
+ e);
+ }
+ }
+
+ @Override
+ public SyncReplicationState getPeerSyncReplicationState(String peerId)
+ throws ReplicationException {
+ byte[] data;
+ try {
+ data = ZKUtil.getData(zookeeper, getSyncReplicationStateNode(peerId));
+ } catch (KeeperException | InterruptedException e) {
+ throw new ReplicationException(
+ "Error getting cluster state for the synchronous replication peer with id=" + peerId, e);
+ }
+ return SyncReplicationState.valueOf(Bytes.toInt(data));
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
index fccffb5..fe658a3 100644
--- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
+++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
@@ -160,7 +160,8 @@ public abstract class TestReplicationStateBasic {
assertTrue(rqs.getReplicableHFiles(ID_ONE).isEmpty());
assertEquals(0, rqs.getAllPeersFromHFileRefsQueue().size());
rp.getPeerStorage().addPeer(ID_ONE,
- ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true);
+ ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true,
+ SyncReplicationState.NONE);
rqs.addPeerToHFileRefs(ID_ONE);
rqs.addHFileRefs(ID_ONE, files1);
assertEquals(1, rqs.getAllPeersFromHFileRefsQueue().size());
@@ -183,10 +184,12 @@ public abstract class TestReplicationStateBasic {
public void testRemovePeerForHFileRefs() throws ReplicationException, KeeperException {
rp.init();
rp.getPeerStorage().addPeer(ID_ONE,
- ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true);
+ ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true,
+ SyncReplicationState.NONE);
rqs.addPeerToHFileRefs(ID_ONE);
rp.getPeerStorage().addPeer(ID_TWO,
- ReplicationPeerConfig.newBuilder().setClusterKey(KEY_TWO).build(), true);
+ ReplicationPeerConfig.newBuilder().setClusterKey(KEY_TWO).build(), true,
+ SyncReplicationState.NONE);
rqs.addPeerToHFileRefs(ID_TWO);
List<Pair<Path, Path>> files1 = new ArrayList<>(3);
@@ -235,9 +238,13 @@ public abstract class TestReplicationStateBasic {
assertNumberOfPeers(0);
// Add some peers
- rp.getPeerStorage().addPeer(ID_ONE, new ReplicationPeerConfig().setClusterKey(KEY_ONE), true);
+ rp.getPeerStorage().addPeer(ID_ONE,
+ ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true,
+ SyncReplicationState.NONE);
assertNumberOfPeers(1);
- rp.getPeerStorage().addPeer(ID_TWO, new ReplicationPeerConfig().setClusterKey(KEY_TWO), true);
+ rp.getPeerStorage().addPeer(ID_TWO,
+ ReplicationPeerConfig.newBuilder().setClusterKey(KEY_TWO).build(), true,
+ SyncReplicationState.NONE);
assertNumberOfPeers(2);
assertEquals(KEY_ONE, ZKConfig.getZooKeeperClusterKey(ReplicationUtils
@@ -247,7 +254,9 @@ public abstract class TestReplicationStateBasic {
assertNumberOfPeers(1);
// Add one peer
- rp.getPeerStorage().addPeer(ID_ONE, new ReplicationPeerConfig().setClusterKey(KEY_ONE), true);
+ rp.getPeerStorage().addPeer(ID_ONE,
+ ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true,
+ SyncReplicationState.NONE);
rp.addPeer(ID_ONE);
assertNumberOfPeers(2);
assertTrue(rp.getPeer(ID_ONE).isPeerEnabled());
@@ -311,7 +320,7 @@ public abstract class TestReplicationStateBasic {
// Add peers for the corresponding queues so they are not orphans
rp.getPeerStorage().addPeer("qId" + i,
ReplicationPeerConfig.newBuilder().setClusterKey("localhost:2818:/bogus" + i).build(),
- true);
+ true, SyncReplicationState.NONE);
}
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
index 3290fb0..1258695 100644
--- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
+++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
@@ -87,8 +87,9 @@ public class TestZKReplicationPeerStorage {
Random rand = new Random(seed);
return ReplicationPeerConfig.newBuilder().setClusterKey(Long.toHexString(rand.nextLong()))
.setReplicationEndpointImpl(Long.toHexString(rand.nextLong()))
- .setNamespaces(randNamespaces(rand)).setExcludeNamespaces(randNamespaces(rand))
- .setTableCFsMap(randTableCFs(rand)).setReplicateAllUserTables(rand.nextBoolean())
+ .setRemoteWALDir(Long.toHexString(rand.nextLong())).setNamespaces(randNamespaces(rand))
+ .setExcludeNamespaces(randNamespaces(rand)).setTableCFsMap(randTableCFs(rand))
+ .setExcludeTableCFsMap(randTableCFs(rand)).setReplicateAllUserTables(rand.nextBoolean())
.setBandwidth(rand.nextInt(1000)).build();
}
@@ -139,7 +140,8 @@ public class TestZKReplicationPeerStorage {
public void test() throws ReplicationException {
int peerCount = 10;
for (int i = 0; i < peerCount; i++) {
- STORAGE.addPeer(Integer.toString(i), getConfig(i), i % 2 == 0);
+ STORAGE.addPeer(Integer.toString(i), getConfig(i), i % 2 == 0,
+ SyncReplicationState.valueOf(i % 4));
}
List<String> peerIds = STORAGE.listPeerIds();
assertEquals(peerCount, peerIds.size());
@@ -163,6 +165,10 @@ public class TestZKReplicationPeerStorage {
for (int i = 0; i < peerCount; i++) {
assertEquals(i % 2 != 0, STORAGE.isPeerEnabled(Integer.toString(i)));
}
+ for (int i = 0; i < peerCount; i++) {
+ assertEquals(SyncReplicationState.valueOf(i % 4),
+ STORAGE.getPeerSyncReplicationState(Integer.toString(i)));
+ }
String toRemove = Integer.toString(peerCount / 2);
STORAGE.removePeer(toRemove);
peerIds = STORAGE.listPeerIds();
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index a17bc9f..8d2b55f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@@ -1232,6 +1233,28 @@ public interface MasterObserver {
String regex) throws IOException {}
/**
+ * Called before transit current cluster state for the specified synchronous replication peer
+ * @param ctx the environment to interact with the framework and master
+ * @param peerId a short name that identifies the peer
+ * @param state a new state
+ */
+ default void preTransitReplicationPeerSyncReplicationState(
+ final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
+ SyncReplicationState state) throws IOException {
+ }
+
+ /**
+ * Called after transit current cluster state for the specified synchronous replication peer
+ * @param ctx the environment to interact with the framework and master
+ * @param peerId a short name that identifies the peer
+ * @param state a new state
+ */
+ default void postTransitReplicationPeerSyncReplicationState(
+ final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
+ SyncReplicationState state) throws IOException {
+ }
+
+ /**
* Called before new LockProcedure is queued.
* @param ctx the environment to interact with the framework and master
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ecf6d63..ddffbec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -135,6 +135,7 @@ import org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
import org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure;
import org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
+import org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
import org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.mob.MobConstants;
@@ -169,6 +170,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.replication.ReplicationUtils;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
import org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
@@ -3416,6 +3418,16 @@ public class HMaster extends HRegionServer implements MasterServices {
return peers;
}
+ @Override
+ public long transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
+ throws ReplicationException, IOException {
+ LOG.info(
+ getClientIdAuditPrefix() +
+ " transit current cluster state to {} in a synchronous replication peer id={}",
+ state, peerId);
+ return executePeerProcedure(new TransitPeerSyncReplicationStateProcedure(peerId, state));
+ }
+
/**
* Mark region server(s) as decommissioned (previously called 'draining') to prevent additional
* regions from getting assigned to them. Also unload the regions on the servers asynchronously.0
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 7f17227..ba5e70a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.security.User;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -1524,6 +1525,26 @@ public class MasterCoprocessorHost
});
}
+ public void preTransitReplicationPeerSyncReplicationState(final String peerId,
+ final SyncReplicationState clusterState) throws IOException {
+ execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
+ @Override
+ public void call(MasterObserver observer) throws IOException {
+ observer.preTransitReplicationPeerSyncReplicationState(this, peerId, clusterState);
+ }
+ });
+ }
+
+ public void postTransitReplicationPeerSyncReplicationState(final String peerId,
+ final SyncReplicationState clusterState) throws IOException {
+ execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
+ @Override
+ public void call(MasterObserver observer) throws IOException {
+ observer.postTransitReplicationPeerSyncReplicationState(this, peerId, clusterState);
+ }
+ });
+ }
+
public void preRequestLock(String namespace, TableName tableName, RegionInfo[] regionInfos,
LockType type, String description) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 377a9c6..f5bf117 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessChecker;
import org.apache.hadoop.hbase.security.access.AccessController;
@@ -285,6 +286,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListR
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@@ -1958,6 +1961,20 @@ public class MasterRpcServices extends RSRpcServices
}
@Override
+ public TransitReplicationPeerSyncReplicationStateResponse
+ transitReplicationPeerSyncReplicationState(RpcController controller,
+ TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException {
+ try {
+ long procId = master.transitReplicationPeerSyncReplicationState(request.getPeerId(),
+ SyncReplicationState.valueOf(request.getSyncReplicationState().getNumber()));
+ return TransitReplicationPeerSyncReplicationStateResponse.newBuilder().setProcId(procId)
+ .build();
+ } catch (ReplicationException | IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
public ListReplicationPeersResponse listReplicationPeers(RpcController controller,
ListReplicationPeersRequest request) throws ServiceException {
ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder();
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 9d371bd..5c6f2dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
@@ -480,6 +481,14 @@ public interface MasterServices extends Server {
IOException;
/**
+ * Set current cluster state for a synchronous replication peer.
+ * @param peerId a short name that identifies the peer
+ * @param clusterState state of current cluster
+ */
+ long transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState clusterState)
+ throws ReplicationException, IOException;
+
+ /**
* @return {@link LockManager} to lock namespaces/tables/regions.
*/
LockManager getLockManager();
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.java
index 4abc9ad..fc5348e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.java
@@ -25,7 +25,7 @@ import org.apache.yetus.audience.InterfaceStability;
public interface PeerProcedureInterface {
enum PeerOperationType {
- ADD, REMOVE, ENABLE, DISABLE, UPDATE_CONFIG, REFRESH
+ ADD, REMOVE, ENABLE, DISABLE, UPDATE_CONFIG, REFRESH, TRANSIT_SYNC_REPLICATION_STATE
}
String getPeerId();
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index d715e2e..9336fbd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.master.replication;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.EnumMap;
+import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
import org.apache.hadoop.hbase.replication.ReplicationUtils;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
@@ -61,6 +64,16 @@ public class ReplicationPeerManager {
private final ConcurrentMap<String, ReplicationPeerDescription> peers;
+ private final EnumMap<SyncReplicationState, EnumSet<SyncReplicationState>> allowedTransition =
+ new EnumMap<SyncReplicationState, EnumSet<SyncReplicationState>>(SyncReplicationState.class) {
+ {
+ put(SyncReplicationState.ACTIVE, EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE));
+ put(SyncReplicationState.STANDBY, EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE));
+ put(SyncReplicationState.DOWNGRADE_ACTIVE,
+ EnumSet.of(SyncReplicationState.STANDBY, SyncReplicationState.ACTIVE));
+ }
+ };
+
ReplicationPeerManager(ReplicationPeerStorage peerStorage, ReplicationQueueStorage queueStorage,
ConcurrentMap<String, ReplicationPeerDescription> peers) {
this.peerStorage = peerStorage;
@@ -163,6 +176,17 @@ public class ReplicationPeerManager {
}
}
+ public void preTransitPeerSyncReplicationState(String peerId, SyncReplicationState state)
+ throws DoNotRetryIOException {
+ ReplicationPeerDescription desc = checkPeerExists(peerId);
+ SyncReplicationState fromState = desc.getSyncReplicationState();
+ EnumSet<SyncReplicationState> allowedToStates = allowedTransition.get(fromState);
+ if (allowedToStates == null || !allowedToStates.contains(state)) {
+ throw new DoNotRetryIOException("Can not transit current cluster state from " + fromState +
+ " to " + state + " for peer id=" + peerId);
+ }
+ }
+
public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
throws ReplicationException {
if (peers.containsKey(peerId)) {
@@ -170,8 +194,12 @@ public class ReplicationPeerManager {
return;
}
ReplicationPeerConfig copiedPeerConfig = ReplicationPeerConfig.newBuilder(peerConfig).build();
- peerStorage.addPeer(peerId, copiedPeerConfig, enabled);
- peers.put(peerId, new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig));
+ SyncReplicationState syncReplicationState =
+ StringUtils.isBlank(peerConfig.getRemoteWALDir()) ? SyncReplicationState.NONE
+ : SyncReplicationState.DOWNGRADE_ACTIVE;
+ peerStorage.addPeer(peerId, copiedPeerConfig, enabled, syncReplicationState);
+ peers.put(peerId,
+ new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig, syncReplicationState));
}
public void removePeer(String peerId) throws ReplicationException {
@@ -190,7 +218,8 @@ public class ReplicationPeerManager {
return;
}
peerStorage.setPeerState(peerId, enabled);
- peers.put(peerId, new ReplicationPeerDescription(peerId, enabled, desc.getPeerConfig()));
+ peers.put(peerId, new ReplicationPeerDescription(peerId, enabled, desc.getPeerConfig(),
+ desc.getSyncReplicationState()));
}
public void enablePeer(String peerId) throws ReplicationException {
@@ -215,7 +244,8 @@ public class ReplicationPeerManager {
newPeerConfigBuilder.putAllConfiguration(peerConfig.getConfiguration());
ReplicationPeerConfig newPeerConfig = newPeerConfigBuilder.build();
peerStorage.updatePeerConfig(peerId, newPeerConfig);
- peers.put(peerId, new ReplicationPeerDescription(peerId, desc.isEnabled(), newPeerConfig));
+ peers.put(peerId, new ReplicationPeerDescription(peerId, desc.isEnabled(), newPeerConfig,
+ desc.getSyncReplicationState()));
}
public List<ReplicationPeerDescription> listPeers(Pattern pattern) {
@@ -231,6 +261,14 @@ public class ReplicationPeerManager {
return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty();
}
+ public void transitPeerSyncReplicationState(String peerId, SyncReplicationState state)
+ throws ReplicationException {
+ ReplicationPeerDescription desc = peers.get(peerId);
+ peerStorage.setPeerSyncReplicationState(peerId, state);
+ peers.put(peerId,
+ new ReplicationPeerDescription(peerId, desc.isEnabled(), desc.getPeerConfig(), state));
+ }
+
public void removeAllQueuesAndHFileRefs(String peerId) throws ReplicationException {
// Here we need two passes to address the problem of claimQueue. Maybe a claimQueue is still
// on-going when the refresh peer config procedure is done, if a RS which has already been
@@ -350,10 +388,11 @@ public class ReplicationPeerManager {
for (String peerId : peerStorage.listPeerIds()) {
ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId);
boolean enabled = peerStorage.isPeerEnabled(peerId);
- peers.put(peerId, new ReplicationPeerDescription(peerId, enabled, peerConfig));
+ SyncReplicationState state = peerStorage.getPeerSyncReplicationState(peerId);
+ peers.put(peerId, new ReplicationPeerDescription(peerId, enabled, peerConfig, state));
}
return new ReplicationPeerManager(peerStorage,
- ReplicationStorageFactory.getReplicationQueueStorage(zk, conf), peers);
+ ReplicationStorageFactory.getReplicationQueueStorage(zk, conf), peers);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
new file mode 100644
index 0000000..d26eecc
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TransitPeerSyncReplicationStateStateData;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+
+/**
+ * The procedure for transit current cluster state for a synchronous replication peer.
+ */
+@InterfaceAudience.Private
+public class TransitPeerSyncReplicationStateProcedure extends ModifyPeerProcedure {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TransitPeerSyncReplicationStateProcedure.class);
+
+ private SyncReplicationState state;
+
+ public TransitPeerSyncReplicationStateProcedure() {
+ }
+
+ public TransitPeerSyncReplicationStateProcedure(String peerId, SyncReplicationState state) {
+ super(peerId);
+ this.state = state;
+ }
+
+ @Override
+ public PeerOperationType getPeerOperationType() {
+ return PeerOperationType.TRANSIT_SYNC_REPLICATION_STATE;
+ }
+
+ @Override
+ protected void prePeerModification(MasterProcedureEnv env)
+ throws IOException, ReplicationException {
+ MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ cpHost.preTransitReplicationPeerSyncReplicationState(peerId, state);
+ }
+ env.getReplicationPeerManager().preTransitPeerSyncReplicationState(peerId, state);
+ }
+
+ @Override
+ protected void updatePeerStorage(MasterProcedureEnv env) throws ReplicationException {
+ env.getReplicationPeerManager().transitPeerSyncReplicationState(peerId, state);
+ }
+
+ @Override
+ protected void postPeerModification(MasterProcedureEnv env)
+ throws IOException, ReplicationException {
+ LOG.info("Successfully transit current cluster state to {} in synchronous replication peer {}",
+ state, peerId);
+ MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ env.getMasterCoprocessorHost().postTransitReplicationPeerSyncReplicationState(peerId, state);
+ }
+ }
+
+ @Override
+ protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
+ super.serializeStateData(serializer);
+ serializer.serialize(TransitPeerSyncReplicationStateStateData.newBuilder()
+ .setSyncReplicationState(ReplicationProtos.SyncReplicationState.forNumber(state.ordinal()))
+ .build());
+ }
+
+ @Override
+ protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
+ super.deserializeStateData(serializer);
+ TransitPeerSyncReplicationStateStateData data =
+ serializer.deserialize(TransitPeerSyncReplicationStateStateData.class);
+ state = SyncReplicationState.valueOf(data.getSyncReplicationState().getNumber());
+ }
+
+ @Override
+ protected Flow executeFromState(MasterProcedureEnv env, PeerModificationState state)
+ throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
+ switch (state) {
+ case PRE_PEER_MODIFICATION:
+ try {
+ prePeerModification(env);
+ } catch (IOException e) {
+ LOG.warn("{} failed to call pre CP hook or the pre check is failed for peer {}, " +
+ "mark the procedure as failure and give up", getClass().getName(), peerId, e);
+ setFailure("master-" + getPeerOperationType().name().toLowerCase() + "-peer", e);
+ releaseLatch();
+ return Flow.NO_MORE_STATE;
+ } catch (ReplicationException e) {
+ LOG.warn("{} failed to call prePeerModification for peer {}, retry", getClass().getName(),
+ peerId, e);
+ throw new ProcedureYieldException();
+ }
+ setNextState(PeerModificationState.UPDATE_PEER_STORAGE);
+ return Flow.HAS_MORE_STATE;
+ case UPDATE_PEER_STORAGE:
+ try {
+ updatePeerStorage(env);
+ } catch (ReplicationException e) {
+ LOG.warn("{} update peer storage for peer {} failed, retry", getClass().getName(), peerId,
+ e);
+ throw new ProcedureYieldException();
+ }
+ setNextState(PeerModificationState.REFRESH_PEER_ON_RS);
+ return Flow.HAS_MORE_STATE;
+ case REFRESH_PEER_ON_RS:
+ // TODO: Need add child procedure for every RegionServer
+ setNextState(PeerModificationState.POST_PEER_MODIFICATION);
+ return Flow.HAS_MORE_STATE;
+ case POST_PEER_MODIFICATION:
+ try {
+ postPeerModification(env);
+ } catch (ReplicationException e) {
+ LOG.warn("{} failed to call postPeerModification for peer {}, retry",
+ getClass().getName(), peerId, e);
+ throw new ProcedureYieldException();
+ } catch (IOException e) {
+ LOG.warn("{} failed to call post CP hook for peer {}, " +
+ "ignore since the procedure has already done", getClass().getName(), peerId, e);
+ }
+ releaseLatch();
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ }
+
+ private void releaseLatch() {
+ ProcedurePrepareLatch.releaseLatch(latch, this);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 1fbf01d..decfb4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -112,6 +112,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTrack
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User;
@@ -2501,6 +2502,13 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
}
@Override
+ public void preTransitReplicationPeerSyncReplicationState(
+ final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
+ SyncReplicationState clusterState) throws IOException {
+ requirePermission(ctx, "transitSyncReplicationPeerState", Action.ADMIN);
+ }
+
+ @Override
public void preListReplicationPeers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String regex) throws IOException {
requirePermission(ctx, "listReplicationPeers", Action.ADMIN);
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index e471100..a7710e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.replication.TestReplicationEndpoint.InterClusterReplicationEndpointForTest;
import org.apache.hadoop.hbase.replication.regionserver.TestReplicator.ReplicationEndpointForTest;
import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -983,4 +984,65 @@ public class TestReplicationAdmin {
// OK
}
}
+
+ @Test
+ public void testTransitSyncReplicationPeerState() throws Exception {
+ ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
+ builder.setClusterKey(KEY_ONE);
+ hbaseAdmin.addReplicationPeer(ID_ONE, builder.build());
+ assertEquals(SyncReplicationState.NONE,
+ hbaseAdmin.getReplicationPeerSyncReplicationState(ID_ONE));
+
+ try {
+ hbaseAdmin.transitReplicationPeerSyncReplicationState(ID_ONE,
+ SyncReplicationState.DOWNGRADE_ACTIVE);
+ fail("Can't transit cluster state if replication peer don't config remote wal dir");
+ } catch (Exception e) {
+ // OK
+ }
+
+ String rootDir = "hdfs://srv1:9999/hbase";
+ builder = ReplicationPeerConfig.newBuilder();
+ builder.setClusterKey(KEY_SECOND);
+ builder.setRemoteWALDir(rootDir);
+ hbaseAdmin.addReplicationPeer(ID_SECOND, builder.build());
+ assertEquals(SyncReplicationState.DOWNGRADE_ACTIVE,
+ hbaseAdmin.getReplicationPeerSyncReplicationState(ID_SECOND));
+
+ // Disable and enable peer don't affect SyncReplicationState
+ hbaseAdmin.disableReplicationPeer(ID_SECOND);
+ assertEquals(SyncReplicationState.DOWNGRADE_ACTIVE,
+ hbaseAdmin.getReplicationPeerSyncReplicationState(ID_SECOND));
+ hbaseAdmin.enableReplicationPeer(ID_SECOND);
+ assertEquals(SyncReplicationState.DOWNGRADE_ACTIVE,
+ hbaseAdmin.getReplicationPeerSyncReplicationState(ID_SECOND));
+
+ hbaseAdmin.transitReplicationPeerSyncReplicationState(ID_SECOND, SyncReplicationState.ACTIVE);
+ assertEquals(SyncReplicationState.ACTIVE,
+ hbaseAdmin.getReplicationPeerSyncReplicationState(ID_SECOND));
+
+ try {
+ hbaseAdmin.transitReplicationPeerSyncReplicationState(ID_SECOND,
+ SyncReplicationState.STANDBY);
+ fail("Can't transit cluster state from ACTIVE to STANDBY");
+ } catch (Exception e) {
+ // OK
+ }
+
+ hbaseAdmin.transitReplicationPeerSyncReplicationState(ID_SECOND,
+ SyncReplicationState.DOWNGRADE_ACTIVE);
+ assertEquals(SyncReplicationState.DOWNGRADE_ACTIVE,
+ hbaseAdmin.getReplicationPeerSyncReplicationState(ID_SECOND));
+
+ hbaseAdmin.transitReplicationPeerSyncReplicationState(ID_SECOND, SyncReplicationState.STANDBY);
+ assertEquals(SyncReplicationState.STANDBY,
+ hbaseAdmin.getReplicationPeerSyncReplicationState(ID_SECOND));
+
+ try {
+ hbaseAdmin.transitReplicationPeerSyncReplicationState(ID_SECOND, SyncReplicationState.ACTIVE);
+ fail("Can't transit cluster state from STANDBY to ACTIVE");
+ } catch (Exception e) {
+ // OK
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 48e9e8d..b7d54d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -54,11 +54,10 @@ import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import com.google.protobuf.Service;
-
public class MockNoopMasterServices implements MasterServices {
private final Configuration conf;
private final MetricsMaster metricsMaster;
@@ -481,4 +480,10 @@ public class MockNoopMasterServices implements MasterServices {
public ReplicationPeerManager getReplicationPeerManager() {
return null;
}
-}
+
+ @Override
+ public long transitReplicationPeerSyncReplicationState(String peerId,
+ SyncReplicationState clusterState) throws ReplicationException, IOException {
+ return 0;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index 08dd428..24b930c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -109,7 +110,8 @@ public class TestReplicationHFileCleaner {
public void setup() throws ReplicationException, IOException {
root = TEST_UTIL.getDataTestDirOnTestFS();
rp.getPeerStorage().addPeer(peerId,
- ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL.getClusterKey()).build(), true);
+ ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL.getClusterKey()).build(), true,
+ SyncReplicationState.NONE);
rq.addPeerToHFileRefs(peerId);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index 52b914e..69121f8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -154,11 +154,13 @@ public class TestReplicationTrackerZKImpl {
public void testPeerNameControl() throws Exception {
int exists = 0;
rp.getPeerStorage().addPeer("6",
- ReplicationPeerConfig.newBuilder().setClusterKey(utility.getClusterKey()).build(), true);
+ ReplicationPeerConfig.newBuilder().setClusterKey(utility.getClusterKey()).build(), true,
+ SyncReplicationState.NONE);
try {
rp.getPeerStorage().addPeer("6",
- ReplicationPeerConfig.newBuilder().setClusterKey(utility.getClusterKey()).build(), true);
+ ReplicationPeerConfig.newBuilder().setClusterKey(utility.getClusterKey()).build(), true,
+ SyncReplicationState.NONE);
} catch (ReplicationException e) {
if (e.getCause() instanceof KeeperException.NodeExistsException) {
exists++;
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index a53cba3..db62b49 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationSourceDummy;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.replication.ZKReplicationPeerStorage;
import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.NodeFailoverWorker;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -584,7 +585,7 @@ public abstract class TestReplicationSourceManager {
private void addPeerAndWait(final String peerId, final ReplicationPeerConfig peerConfig,
final boolean waitForSource) throws Exception {
final ReplicationPeers rp = manager.getReplicationPeers();
- rp.getPeerStorage().addPeer(peerId, peerConfig, true);
+ rp.getPeerStorage().addPeer(peerId, peerConfig, true, SyncReplicationState.NONE);
try {
manager.addPeer(peerId);
} catch (Exception e) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/6fea1c97/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index ab9fb17..b2b5f1c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -117,6 +117,7 @@ import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.Permission.Action;
@@ -2938,6 +2939,21 @@ public class TestAccessController extends SecureTestUtil {
}
@Test
+ public void testTransitSyncReplicationPeerState() throws Exception {
+ AccessTestAction action = new AccessTestAction() {
+ @Override
+ public Object run() throws Exception {
+ ACCESS_CONTROLLER.preTransitReplicationPeerSyncReplicationState(
+ ObserverContextImpl.createAndPrepare(CP_ENV), "test", SyncReplicationState.NONE);
+ return null;
+ }
+ };
+
+ verifyAllowed(action, SUPERUSER, USER_ADMIN);
+ verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+ }
+
+ @Test
public void testListReplicationPeers() throws Exception {
AccessTestAction action = new AccessTestAction() {
@Override
[08/11] hbase git commit: HBASE-19935 Only allow table replication
for sync replication for now
Posted by zh...@apache.org.
HBASE-19935 Only allow table replication for sync replication for now
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/419cb015
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/419cb015
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/419cb015
Branch: refs/heads/HBASE-19064
Commit: 419cb015c7e27aef93da7010eec46793fae6017f
Parents: 2221b84
Author: Guanghao Zhang <zg...@apache.org>
Authored: Tue Feb 6 16:00:59 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Feb 10 17:49:13 2018 +0800
----------------------------------------------------------------------
.../replication/ReplicationPeerConfig.java | 9 +++
.../replication/ReplicationPeerManager.java | 34 ++++++++-
.../replication/TestReplicationAdmin.java | 73 ++++++++++++++------
.../wal/TestCombinedAsyncWriter.java | 6 ++
.../wal/TestSyncReplicationWALProvider.java | 6 ++
5 files changed, 102 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/419cb015/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 4c10c46..69565a7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -25,6 +25,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
+
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@@ -217,6 +219,13 @@ public class ReplicationPeerConfig {
return this.remoteWALDir;
}
+ /**
+ * Use remote wal dir to decide whether a peer is sync replication peer
+ */
+ public boolean isSyncReplication() {
+ return !StringUtils.isBlank(this.remoteWALDir);
+ }
+
public static ReplicationPeerConfigBuilder newBuilder() {
return new ReplicationPeerConfigBuilderImpl();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/419cb015/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 9336fbd..6bfd9c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -167,7 +167,7 @@ public class ReplicationPeerManager {
" does not match new remote wal dir '" + peerConfig.getRemoteWALDir() + "'");
}
- if (oldPeerConfig.getRemoteWALDir() != null) {
+ if (oldPeerConfig.isSyncReplication()) {
if (!ReplicationUtils.isKeyConfigEqual(oldPeerConfig, peerConfig)) {
throw new DoNotRetryIOException(
"Changing the replicated namespace/table config on a synchronous replication "
@@ -195,8 +195,8 @@ public class ReplicationPeerManager {
}
ReplicationPeerConfig copiedPeerConfig = ReplicationPeerConfig.newBuilder(peerConfig).build();
SyncReplicationState syncReplicationState =
- StringUtils.isBlank(peerConfig.getRemoteWALDir()) ? SyncReplicationState.NONE
- : SyncReplicationState.DOWNGRADE_ACTIVE;
+ copiedPeerConfig.isSyncReplication() ? SyncReplicationState.DOWNGRADE_ACTIVE
+ : SyncReplicationState.NONE;
peerStorage.addPeer(peerId, copiedPeerConfig, enabled, syncReplicationState);
peers.put(peerId,
new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig, syncReplicationState));
@@ -316,9 +316,37 @@ public class ReplicationPeerManager {
peerConfig.getTableCFsMap());
}
+ if (peerConfig.isSyncReplication()) {
+ checkPeerConfigForSyncReplication(peerConfig);
+ }
+
checkConfiguredWALEntryFilters(peerConfig);
}
+ private void checkPeerConfigForSyncReplication(ReplicationPeerConfig peerConfig)
+ throws DoNotRetryIOException {
+ // This is used to reduce the difficulty for implementing the sync replication state transition
+ // as we need to reopen all the related regions.
+ // TODO: Add namespace, replicat_all flag back
+ if (peerConfig.replicateAllUserTables()) {
+ throw new DoNotRetryIOException(
+ "Only support replicated table config for sync replication peer");
+ }
+ if (peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) {
+ throw new DoNotRetryIOException(
+ "Only support replicated table config for sync replication peer");
+ }
+ if (peerConfig.getTableCFsMap() == null || peerConfig.getTableCFsMap().isEmpty()) {
+ throw new DoNotRetryIOException("Need config replicated tables for sync replication peer");
+ }
+ for (List<String> cfs : peerConfig.getTableCFsMap().values()) {
+ if (cfs != null && !cfs.isEmpty()) {
+ throw new DoNotRetryIOException(
+ "Only support replicated table config for sync replication peer");
+ }
+ }
+ }
+
/**
* Set a namespace in the peer config means that all tables in this namespace will be replicated
* to the peer cluster.
http://git-wip-us.apache.org/repos/asf/hbase/blob/419cb015/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index a7710e7..d462dbd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -26,6 +26,7 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -910,6 +911,8 @@ public class TestReplicationAdmin {
@Test
public void testPeerRemoteWALDir() throws Exception {
+ TableName tableName = TableName.valueOf(name.getMethodName());
+
String rootDir = "hdfs://srv1:9999/hbase";
ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
builder.setClusterKey(KEY_ONE);
@@ -929,57 +932,74 @@ public class TestReplicationAdmin {
builder = ReplicationPeerConfig.newBuilder();
builder.setClusterKey(KEY_SECOND);
builder.setRemoteWALDir(rootDir);
- hbaseAdmin.addReplicationPeer(ID_SECOND, builder.build());
- rpc = hbaseAdmin.getReplicationPeerConfig(ID_SECOND);
- assertEquals(rootDir, rpc.getRemoteWALDir());
+ try {
+ hbaseAdmin.addReplicationPeer(ID_SECOND, builder.build());
+ fail("Only support replicated table config for sync replication");
+ } catch (Exception e) {
+ // OK
+ }
+ builder.setReplicateAllUserTables(false);
try {
- builder.setRemoteWALDir("hdfs://srv2:8888/hbase");
- hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
- fail("Change remote wal dir is not allowed");
+ Set<String> namespaces = new HashSet<String>();
+ namespaces.add("ns1");
+ builder.setNamespaces(namespaces);
+ hbaseAdmin.addReplicationPeer(ID_SECOND, builder.build());
+ fail("Only support replicated table config for sync replication");
} catch (Exception e) {
// OK
}
+ builder.setNamespaces(null);
try {
- builder.setRemoteWALDir(null);
- hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
- fail("Change remote wal dir is not allowed");
+ hbaseAdmin.addReplicationPeer(ID_SECOND, builder.build());
+ fail("Only support replicated table config for sync replication, and tables can't be empty");
} catch (Exception e) {
// OK
}
+ Map<TableName, List<String>> tableCfs = new HashMap<>();
try {
- builder = ReplicationPeerConfig.newBuilder(rpc);
- builder.setReplicateAllUserTables(false);
+ tableCfs.put(tableName, Arrays.asList("cf1"));
+ builder.setTableCFsMap(tableCfs);
+ hbaseAdmin.addReplicationPeer(ID_SECOND, builder.build());
+ fail("Only support replicated table config for sync replication");
+ } catch (Exception e) {
+ // OK
+ }
+
+ tableCfs = new HashMap<>();
+ tableCfs.put(tableName, new ArrayList<>());
+ builder.setTableCFsMap(tableCfs);
+ hbaseAdmin.addReplicationPeer(ID_SECOND, builder.build());
+ rpc = hbaseAdmin.getReplicationPeerConfig(ID_SECOND);
+ assertEquals(rootDir, rpc.getRemoteWALDir());
+
+ try {
+ builder.setRemoteWALDir("hdfs://srv2:8888/hbase");
hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
- fail(
- "Change replicated namespace/table config on an existing synchronous peer is not allowed");
+ fail("Change remote wal dir is not allowed");
} catch (Exception e) {
// OK
}
try {
- builder = ReplicationPeerConfig.newBuilder(rpc);
- Set<String> namespaces = new HashSet<>();
- namespaces.add("ns1");
- builder.setExcludeNamespaces(namespaces);
+ builder.setRemoteWALDir(null);
hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
- fail(
- "Change replicated namespace/table config on an existing synchronous peer is not allowed");
+ fail("Change remote wal dir is not allowed");
} catch (Exception e) {
// OK
}
try {
builder = ReplicationPeerConfig.newBuilder(rpc);
- Map<TableName, List<String>> tableCfs = new HashMap<>();
- tableCfs.put(TableName.valueOf(name.getMethodName()), new ArrayList<>());
- builder.setExcludeTableCFsMap(tableCfs);
+ tableCfs = new HashMap<>();
+ tableCfs.put(TableName.valueOf("ns1:" + name.getMethodName()), new ArrayList<>());
+ builder.setTableCFsMap(tableCfs);
hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, builder.build());
fail(
- "Change replicated namespace/table config on an existing synchronous peer is not allowed");
+ "Change replicated table config on an existing synchronous peer is not allowed");
} catch (Exception e) {
// OK
}
@@ -987,8 +1007,11 @@ public class TestReplicationAdmin {
@Test
public void testTransitSyncReplicationPeerState() throws Exception {
+ TableName tableName = TableName.valueOf(name.getMethodName());
+
ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
builder.setClusterKey(KEY_ONE);
+ builder.setReplicateAllUserTables(false);
hbaseAdmin.addReplicationPeer(ID_ONE, builder.build());
assertEquals(SyncReplicationState.NONE,
hbaseAdmin.getReplicationPeerSyncReplicationState(ID_ONE));
@@ -1005,6 +1028,10 @@ public class TestReplicationAdmin {
builder = ReplicationPeerConfig.newBuilder();
builder.setClusterKey(KEY_SECOND);
builder.setRemoteWALDir(rootDir);
+ builder.setReplicateAllUserTables(false);
+ Map<TableName, List<String>> tableCfs = new HashMap<>();
+ tableCfs.put(tableName, new ArrayList<>());
+ builder.setTableCFsMap(tableCfs);
hbaseAdmin.addReplicationPeer(ID_SECOND, builder.build());
assertEquals(SyncReplicationState.DOWNGRADE_ACTIVE,
hbaseAdmin.getReplicationPeerSyncReplicationState(ID_SECOND));
http://git-wip-us.apache.org/repos/asf/hbase/blob/419cb015/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
index 36dbe0f..07aa6a8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
@@ -23,6 +23,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.wal.AsyncFSWALProvider.AsyncWriter;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
+import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -51,6 +53,10 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
@Category({ RegionServerTests.class, MediumTests.class })
public class TestCombinedAsyncWriter {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestCombinedAsyncWriter.class);
+
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static EventLoopGroup EVENT_LOOP_GROUP;
http://git-wip-us.apache.org/repos/asf/hbase/blob/419cb015/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
index 60a9e13..f09e51e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertThat;
import java.io.IOException;
import java.util.Optional;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
@@ -41,12 +42,17 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.junit.AfterClass;
import org.junit.BeforeClass;
+import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({ RegionServerTests.class, MediumTests.class })
public class TestSyncReplicationWALProvider {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSyncReplicationWALProvider.class);
+
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static String PEER_ID = "1";