You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2017/11/07 22:57:13 UTC
[01/15] hbase git commit: HBASE-19185 ClassNotFoundException:
com.fasterxml.jackson.* [Forced Update!]
Repository: hbase
Updated Branches:
refs/heads/HBASE-19189 1b1ba46fb -> 5339d25b4 (forced update)
HBASE-19185 ClassNotFoundException: com.fasterxml.jackson.*
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c463e9c8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c463e9c8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c463e9c8
Branch: refs/heads/HBASE-19189
Commit: c463e9c8403645597141b18cb9d502623fa7f104
Parents: 28cdf4a
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Sun Nov 5 23:30:28 2017 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Sun Nov 5 23:30:28 2017 +0800
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/c463e9c8/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index cf86184..40e2cb9 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -820,7 +820,10 @@ public class TableMapReduceUtil {
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists.class,
org.apache.htrace.Trace.class,
com.codahale.metrics.MetricRegistry.class,
- org.apache.commons.lang3.ArrayUtils.class);
+ org.apache.commons.lang3.ArrayUtils.class,
+ com.fasterxml.jackson.databind.ObjectMapper.class,
+ com.fasterxml.jackson.core.Versioned.class,
+ com.fasterxml.jackson.annotation.JsonView.class);
}
/**
[13/15] hbase git commit: HBASE-19174 Updated link to presentations
to link to book
Posted by bu...@apache.org.
HBASE-19174 Updated link to presentations to link to book
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/29fd1dea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/29fd1dea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/29fd1dea
Branch: refs/heads/HBASE-19189
Commit: 29fd1dead227a6e72d29e5b5fc990a08a7c4bb05
Parents: 9d63bda
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sat Nov 4 00:04:00 2017 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Nov 7 08:29:38 2017 +0100
----------------------------------------------------------------------
src/site/asciidoc/old_news.adoc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/29fd1dea/src/site/asciidoc/old_news.adoc
----------------------------------------------------------------------
diff --git a/src/site/asciidoc/old_news.adoc b/src/site/asciidoc/old_news.adoc
index c5cf993..4ae3d7a 100644
--- a/src/site/asciidoc/old_news.adoc
+++ b/src/site/asciidoc/old_news.adoc
@@ -113,7 +113,7 @@ October 2nd, 2009:: HBase at Hadoop World in NYC. A few of us will be talking on
August 7th-9th, 2009:: HUG7 and HBase Hackathon at StumbleUpon in SF: Sign up for the:: link:http://www.meetup.com/hbaseusergroup/calendar/10950511/[HBase User Group Meeting, HUG7] or for the link:http://www.meetup.com/hackathon/calendar/10951718/[Hackathon] or for both (all are welcome!).
-June, 2009:: HBase at HadoopSummit2009 and at NOSQL: See the link:https://wiki.apache.org/hadoop/HBase/HBasePresentations[presentations]
+June, 2009:: HBase at HadoopSummit2009 and at NOSQL: See the link:https://hbase.apache.org/book.html#other.info.pres[presentations]
March 3rd, 2009 :: HUG6 -- link:http://www.meetup.com/hbaseusergroup/calendar/9764004/[HBase User Group 6]
[11/15] hbase git commit: HBASE-19183 Removed redundant groupId from
hbase-checkstyle and hbase-error-prone
Posted by bu...@apache.org.
HBASE-19183 Removed redundant groupId from hbase-checkstyle and hbase-error-prone
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4e3f902
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4e3f902
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4e3f902
Branch: refs/heads/HBASE-19189
Commit: d4e3f902e6ba5b747295ca6053f34badd4018175
Parents: 0356674
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sat Nov 4 23:01:40 2017 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Nov 7 08:20:51 2017 +0100
----------------------------------------------------------------------
hbase-build-support/hbase-error-prone/pom.xml | 1 -
hbase-checkstyle/pom.xml | 1 -
2 files changed, 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4e3f902/hbase-build-support/hbase-error-prone/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-build-support/hbase-error-prone/pom.xml b/hbase-build-support/hbase-error-prone/pom.xml
index 907d82d..067e154 100644
--- a/hbase-build-support/hbase-error-prone/pom.xml
+++ b/hbase-build-support/hbase-error-prone/pom.xml
@@ -26,7 +26,6 @@
<version>3.0.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
- <groupId>org.apache.hbase</groupId>
<artifactId>hbase-error-prone</artifactId>
<version>3.0.0-SNAPSHOT</version>
<name>Apache HBase - Error Prone Rules</name>
http://git-wip-us.apache.org/repos/asf/hbase/blob/d4e3f902/hbase-checkstyle/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml
index ed84b20..2b30c12 100644
--- a/hbase-checkstyle/pom.xml
+++ b/hbase-checkstyle/pom.xml
@@ -22,7 +22,6 @@
*/
-->
<modelVersion>4.0.0</modelVersion>
-<groupId>org.apache.hbase</groupId>
<artifactId>hbase-checkstyle</artifactId>
<version>3.0.0-SNAPSHOT</version>
<name>Apache HBase - Checkstyle</name>
[03/15] hbase git commit: HBASE-18950 Remove Optional parameters in
AsyncAdmin interface
Posted by bu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java
index c3c4045..83ba244 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java
@@ -21,10 +21,8 @@ import static org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER
import java.util.Arrays;
import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.ExecutionException;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import java.util.regex.Pattern;
@@ -41,8 +39,6 @@ import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.TestName;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameter;
import org.junit.runners.Parameterized.Parameters;
@@ -106,7 +102,7 @@ public abstract class TestAsyncAdminBase {
@After
public void tearDown() throws Exception {
- admin.listTableNames(Optional.of(Pattern.compile(tableName.getNameAsString() + ".*")), false)
+ admin.listTableNames(Pattern.compile(tableName.getNameAsString() + ".*"), false)
.whenCompleteAsync((tables, err) -> {
if (tables != null) {
tables.forEach(table -> {
@@ -122,19 +118,21 @@ public abstract class TestAsyncAdminBase {
}
protected void createTableWithDefaultConf(TableName tableName) {
- createTableWithDefaultConf(tableName, Optional.empty());
+ createTableWithDefaultConf(tableName, null);
}
- protected void createTableWithDefaultConf(TableName tableName, Optional<byte[][]> splitKeys) {
+ protected void createTableWithDefaultConf(TableName tableName, byte[][] splitKeys) {
createTableWithDefaultConf(tableName, splitKeys, FAMILY);
}
- protected void createTableWithDefaultConf(TableName tableName, Optional<byte[][]> splitKeys,
+ protected void createTableWithDefaultConf(TableName tableName, byte[][] splitKeys,
byte[]... families) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
for (byte[] family : families) {
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
}
- admin.createTable(builder.build(), splitKeys).join();
+ CompletableFuture<Void> future = splitKeys == null ? admin.createTable(builder.build())
+ : admin.createTable(builder.build(), splitKeys);
+ future.join();
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
index 53de2b5..e7c439b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
@@ -31,7 +31,6 @@ import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
-import java.util.Optional;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
@@ -254,7 +253,7 @@ public class TestAsyncClusterAdminApi extends TestAsyncAdminBase {
List<RegionInfo> tableRegions = admin.getTableRegions(table).get();
List<RegionLoad> regionLoads = Lists.newArrayList();
for (ServerName serverName : servers) {
- regionLoads.addAll(admin.getRegionLoads(serverName, Optional.of(table)).get());
+ regionLoads.addAll(admin.getRegionLoads(serverName, table).get());
}
checkRegionsAndRegionLoads(tableRegions, regionLoads);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
index 262cac6..1ee1b94 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
@@ -200,7 +200,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
}
assertTrue(destServerName != null && !destServerName.equals(serverName));
- admin.move(hri.getRegionName(), Optional.of(destServerName)).get();
+ admin.move(hri.getRegionName(), destServerName).get();
long timeoutTime = System.currentTimeMillis() + 30000;
while (true) {
@@ -362,7 +362,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
@Test
public void testMergeRegions() throws Exception {
byte[][] splitRows = new byte[][] { Bytes.toBytes("3"), Bytes.toBytes("6") };
- createTableWithDefaultConf(tableName, Optional.of(splitRows));
+ createTableWithDefaultConf(tableName, splitRows);
RawAsyncTable metaTable = ASYNC_CONN.getRawTable(META_TABLE_NAME);
List<HRegionLocation> regionLocations =
@@ -419,8 +419,11 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
table.putAll(puts).join();
if (isSplitRegion) {
- admin.splitRegion(regionLocations.get(0).getRegionInfo().getRegionName(),
- Optional.ofNullable(splitPoint)).get();
+ if (splitPoint == null) {
+ admin.splitRegion(regionLocations.get(0).getRegionInfo().getRegionName()).get();
+ } else {
+ admin.splitRegion(regionLocations.get(0).getRegionInfo().getRegionName(), splitPoint).get();
+ }
} else {
if (splitPoint == null) {
admin.split(tableName).get();
@@ -450,7 +453,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
@Test
public void testCompactRegionServer() throws Exception {
byte[][] families = { Bytes.toBytes("f1"), Bytes.toBytes("f2"), Bytes.toBytes("f3") };
- createTableWithDefaultConf(tableName, Optional.empty(), families);
+ createTableWithDefaultConf(tableName, null, families);
loadData(tableName, families, 3000, 8);
List<HRegionServer> rsList =
@@ -491,7 +494,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
byte[] family = Bytes.toBytes("family");
byte[][] families =
{ family, Bytes.add(family, Bytes.toBytes("2")), Bytes.add(family, Bytes.toBytes("3")) };
- createTableWithDefaultConf(tableName, Optional.empty(), families);
+ createTableWithDefaultConf(tableName, null, families);
loadData(tableName, families, 3000, flushes);
List<Region> regions = new ArrayList<>();
@@ -506,15 +509,15 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
assertTrue(countBefore > 0); // there should be some data files
if (expectedState == CompactionState.MINOR) {
if (singleFamily) {
- admin.compact(tableName, Optional.of(family)).get();
+ admin.compact(tableName, family).get();
} else {
- admin.compact(tableName, Optional.empty()).get();
+ admin.compact(tableName).get();
}
} else {
if (singleFamily) {
- admin.majorCompact(tableName, Optional.of(family)).get();
+ admin.majorCompact(tableName, family).get();
} else {
- admin.majorCompact(tableName, Optional.empty()).get();
+ admin.majorCompact(tableName).get();
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java
index f789da5..5014e96 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java
@@ -32,7 +32,6 @@ import org.junit.runners.Parameterized;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
-import java.util.Optional;
import java.util.regex.Pattern;
@RunWith(Parameterized.class)
@@ -169,13 +168,10 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase {
admin.snapshot(snapshotName3, tableName).get();
Assert.assertEquals(admin.listSnapshots().get().size(), 3);
- Assert.assertEquals(admin.listSnapshots(Optional.of(Pattern.compile("(.*)"))).get().size(), 3);
- Assert.assertEquals(admin.listSnapshots(Optional.of(Pattern.compile("snapshotName(\\d+)")))
- .get().size(), 3);
- Assert.assertEquals(admin.listSnapshots(Optional.of(Pattern.compile("snapshotName[1|3]")))
- .get().size(), 2);
- Assert.assertEquals(admin.listSnapshots(Optional.of(Pattern.compile("snapshot(.*)"))).get()
- .size(), 3);
+ Assert.assertEquals(admin.listSnapshots(Pattern.compile("(.*)")).get().size(), 3);
+ Assert.assertEquals(admin.listSnapshots(Pattern.compile("snapshotName(\\d+)")).get().size(), 3);
+ Assert.assertEquals(admin.listSnapshots(Pattern.compile("snapshotName[1|3]")).get().size(), 2);
+ Assert.assertEquals(admin.listSnapshots(Pattern.compile("snapshot(.*)")).get().size(), 3);
Assert.assertEquals(
admin.listTableSnapshots(Pattern.compile("testListSnapshots"), Pattern.compile("s(.*)")).get()
.size(),
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 4df5947..7bbbd71 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -120,9 +120,9 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
admin.deleteTable(tables[i]).join();
}
- tableDescs = admin.listTables(Optional.empty(), true).get();
+ tableDescs = admin.listTables(true).get();
assertTrue("Not found system tables", tableDescs.size() > 0);
- tableNames = admin.listTableNames(Optional.empty(), true).get();
+ tableNames = admin.listTableNames(true).get();
assertTrue("Not found system tables", tableNames.size() > 0);
}
@@ -169,7 +169,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
assertEquals("Table should have only 1 region", 1, regionLocations.size());
final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "_2");
- createTableWithDefaultConf(tableName2, Optional.of(new byte[][] { new byte[] { 42 } }));
+ createTableWithDefaultConf(tableName2, new byte[][] { new byte[] { 42 } });
regionLocations =
AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, Optional.of(tableName2)).get();
assertEquals("Table should have only 2 region", 2, regionLocations.size());
@@ -208,7 +208,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 }, };
int expectedRegions = splitKeys.length + 1;
boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
- createTableWithDefaultConf(tableName, Optional.of(splitKeys));
+ createTableWithDefaultConf(tableName, splitKeys);
boolean tableAvailable = admin.isTableAvailable(tableName, splitKeys).get();
assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable);
@@ -342,7 +342,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
new byte[] { 3, 3, 3 }, new byte[] { 2, 2, 2 } };
final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "_4");;
try {
- createTableWithDefaultConf(tableName4, Optional.of(splitKeys));
+ createTableWithDefaultConf(tableName4, splitKeys);
fail("Should not be able to create this table because of " + "duplicate split keys");
} catch (CompletionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
@@ -376,7 +376,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
byte[][] splitKeys = new byte[1][];
splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
try {
- createTableWithDefaultConf(tableName, Optional.of(splitKeys));
+ createTableWithDefaultConf(tableName, splitKeys);
fail("Test case should fail as empty split key is passed.");
} catch (CompletionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
@@ -390,7 +390,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY;
splitKeys[2] = "region2".getBytes();
try {
- createTableWithDefaultConf(tableName, Optional.of(splitKeys));
+ createTableWithDefaultConf(tableName, splitKeys);
fail("Test case should fail as empty split key is passed.");
} catch (CompletionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
@@ -423,7 +423,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
splitKeys[1] = Bytes.toBytes(8);
// Create & Fill the table
- createTableWithDefaultConf(tableName, Optional.of(splitKeys));
+ createTableWithDefaultConf(tableName, splitKeys);
RawAsyncTable table = ASYNC_CONN.getRawTable(tableName);
int expectedRows = 10;
for (int i = 0; i < expectedRows; i++) {
@@ -517,7 +517,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
table1.get(get).get();
table2.get(get).get();
- admin.listTableNames(Optional.of(Pattern.compile(tableName.getNameAsString() + ".*")), false)
+ admin.listTableNames(Pattern.compile(tableName.getNameAsString() + ".*"), false)
.get().forEach(t -> admin.disableTable(t).join());
// Test that tables are disabled
@@ -541,7 +541,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
assertEquals(TableState.State.DISABLED, getStateFromMeta(tableName1));
assertEquals(TableState.State.DISABLED, getStateFromMeta(tableName2));
- admin.listTableNames(Optional.of(Pattern.compile(tableName.getNameAsString() + ".*")), false)
+ admin.listTableNames(Pattern.compile(tableName.getNameAsString() + ".*"), false)
.get().forEach(t -> admin.enableTable(t).join());
// Test that tables are enabled
@@ -567,7 +567,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 },
new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } };
int expectedRegions = splitKeys.length + 1;
- createTableWithDefaultConf(tableName, Optional.of(splitKeys));
+ createTableWithDefaultConf(tableName, splitKeys);
RawAsyncTable metaTable = ASYNC_CONN.getRawTable(META_TABLE_NAME);
List<HRegionLocation> regions =
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index 60b0260..d24711a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -1543,7 +1543,7 @@ public class TestMasterObserver {
assertTrue("Found server", found);
LOG.info("Found " + destName);
master.getMasterRpcServices().moveRegion(null, RequestConverter.buildMoveRegionRequest(
- firstGoodPair.getRegionInfo().getEncodedNameAsBytes(),Bytes.toBytes(destName)));
+ firstGoodPair.getRegionInfo().getEncodedNameAsBytes(), ServerName.valueOf(destName)));
assertTrue("Coprocessor should have been called on region move",
cp.wasMoveCalled());
@@ -1565,11 +1565,12 @@ public class TestMasterObserver {
UTIL.waitUntilNoRegionsInTransition();
List<RegionInfo> openRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
int moveCnt = openRegions.size()/2;
- for (int i=0; i<moveCnt; i++) {
+ for (int i = 0; i < moveCnt; i++) {
RegionInfo info = openRegions.get(i);
if (!info.isMetaRegion()) {
- master.getMasterRpcServices().moveRegion(null, RequestConverter.buildMoveRegionRequest(
- openRegions.get(i).getEncodedNameAsBytes(), destRS));
+ master.getMasterRpcServices().moveRegion(null,
+ RequestConverter.buildMoveRegionRequest(openRegions.get(i).getEncodedNameAsBytes(),
+ ServerName.valueOf(Bytes.toString(destRS))));
}
}
//Make sure no regions are in transition now
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
index 53d9741..723b570 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
@@ -242,7 +242,7 @@ public class TestHRegionServerBulkLoad {
AdminProtos.AdminService.BlockingInterface server =
conn.getAdmin(getLocation().getServerName());
CompactRegionRequest request = RequestConverter.buildCompactRegionRequest(
- getLocation().getRegionInfo().getRegionName(), true, Optional.empty());
+ getLocation().getRegionInfo().getRegionName(), true, null);
server.compactRegion(null, request);
numCompactions.incrementAndGet();
return null;
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java
index 7f486e4..da4b740 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import java.util.Optional;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
@@ -124,7 +123,7 @@ public class TestHRegionServerBulkLoadWithOldClient extends TestHRegionServerBul
conn.getAdmin(getLocation().getServerName());
CompactRegionRequest request =
RequestConverter.buildCompactRegionRequest(
- getLocation().getRegionInfo().getRegionName(), true, Optional.empty());
+ getLocation().getRegionInfo().getRegionName(), true, null);
server.compactRegion(null, request);
numCompactions.incrementAndGet();
return null;
[07/15] hbase git commit: HBASE-19111 Add CellUtil#isPut and
deprecate methods returning/expecting non public-api data
Posted by bu...@apache.org.
HBASE-19111 Add CellUtil#isPut and deprecate methods returning/expecting non public-api data
KeyValue.Type, and its corresponding byte value, are not public API. We
shouldn't have methods that are expecting them. Added a basic sanity
test for isPut and isDelete.
Signed-off-by: Ramkrishna <ra...@intel.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2a99b87a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2a99b87a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2a99b87a
Branch: refs/heads/HBASE-19189
Commit: 2a99b87af2ebe289e2fec94c9cdca0942397977d
Parents: 33ede55
Author: Josh Elser <el...@apache.org>
Authored: Fri Oct 27 19:27:59 2017 -0400
Committer: Josh Elser <el...@apache.org>
Committed: Mon Nov 6 15:37:12 2017 -0500
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/hbase/Cell.java | 3 +
.../java/org/apache/hadoop/hbase/CellUtil.java | 9 +++
.../hadoop/hbase/client/TestFromClientSide.java | 73 +++++++++++++++-----
3 files changed, 66 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/2a99b87a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
index b2f6304..f5833c8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
@@ -133,7 +133,10 @@ public interface Cell {
/**
* @return The byte representation of the KeyValue.TYPE of this cell: one of Put, Delete, etc
+ * @deprecated since 2.0.0, use appropriate {@link CellUtil#isDelete} or
+ * {@link CellUtil#isPut(Cell)} methods instead. This will be removed in 3.0.0.
*/
+ @Deprecated
byte getTypeByte();
http://git-wip-us.apache.org/repos/asf/hbase/blob/2a99b87a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 78f12b5..52eb8fa 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -893,6 +893,7 @@ public final class CellUtil {
* {KeyValue.Type#DeleteFamily} or a
* {@link KeyValue.Type#DeleteColumn} KeyValue type.
*/
+ @SuppressWarnings("deprecation")
public static boolean isDelete(final Cell cell) {
return PrivateCellUtil.isDelete(cell.getTypeByte());
}
@@ -962,6 +963,14 @@ public final class CellUtil {
}
/**
+ * @return True if this cell is a Put.
+ */
+ @SuppressWarnings("deprecation")
+ public static boolean isPut(Cell cell) {
+ return cell.getTypeByte() == Type.Put.getCode();
+ }
+
+ /**
* Estimate based on keyvalue's serialization format in the RPC layer. Note that there is an extra
* SIZEOF_INT added to the size here that indicates the actual length of the cell for cases where
* cell's are serialized in a contiguous format (For eg in RPCs).
http://git-wip-us.apache.org/repos/asf/hbase/blob/2a99b87a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 804f821..02d3797 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -50,6 +50,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.CompareOperator;
@@ -132,9 +133,6 @@ public class TestFromClientSide {
@Rule
public TestName name = new TestName();
- /**
- * @throws java.lang.Exception
- */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// Uncomment the following lines if more verbosity is needed for
@@ -151,9 +149,6 @@ public class TestFromClientSide {
TEST_UTIL.startMiniCluster(SLAVES);
}
- /**
- * @throws java.lang.Exception
- */
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
@@ -342,8 +337,6 @@ public class TestFromClientSide {
/**
* Test from client side of an involved filter against a multi family that
* involves deletes.
- *
- * @throws Exception
*/
@Test
public void testWeirdCacheBehaviour() throws Exception {
@@ -468,8 +461,6 @@ public class TestFromClientSide {
* Test filters when multiple regions. It does counts. Needs eye-balling of
* logs to ensure that we're not scanning more regions that we're supposed to.
* Related to the TestFilterAcrossRegions over in the o.a.h.h.filter package.
- * @throws IOException
- * @throws InterruptedException
*/
@Test
public void testFilterAcrossMultipleRegions()
@@ -4106,7 +4097,6 @@ public class TestFromClientSide {
/**
* test for HBASE-737
- * @throws IOException
*/
@Test
public void testHBase737 () throws IOException {
@@ -4229,8 +4219,6 @@ public class TestFromClientSide {
/**
* simple test that just executes parts of the client
* API that accept a pre-created Connection instance
- *
- * @throws IOException
*/
@Test
public void testUnmanagedHConnection() throws IOException {
@@ -4247,8 +4235,6 @@ public class TestFromClientSide {
/**
* test of that unmanaged HConnections are able to reconnect
* properly (see HBASE-5058)
- *
- * @throws Exception
*/
@Test
public void testUnmanagedHConnectionReconnect() throws Exception {
@@ -4467,7 +4453,6 @@ public class TestFromClientSide {
/**
* For HBASE-2156
- * @throws Exception
*/
@Test
public void testScanVariableReuse() throws Exception {
@@ -4993,7 +4978,6 @@ public class TestFromClientSide {
/**
* Test ScanMetrics
- * @throws Exception
*/
@Test
@SuppressWarnings ("unused")
@@ -5131,8 +5115,6 @@ public class TestFromClientSide {
*
* Performs inserts, flushes, and compactions, verifying changes in the block
* cache along the way.
- *
- * @throws Exception
*/
@Test
public void testCacheOnWriteEvictOnClose() throws Exception {
@@ -6562,4 +6544,57 @@ public class TestFromClientSide {
table.close();
admin.close();
}
+
+ @Test
+ public void testCellUtilTypeMethods() throws IOException {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ Table table = TEST_UTIL.createTable(tableName, FAMILY);
+
+ final byte[] row = Bytes.toBytes("p");
+ Put p = new Put(row);
+ p.addColumn(FAMILY, QUALIFIER, VALUE);
+ table.put(p);
+
+ try (ResultScanner scanner = table.getScanner(new Scan())) {
+ Result result = scanner.next();
+ assertNotNull(result);
+ CellScanner cs = result.cellScanner();
+ assertTrue(cs.advance());
+ Cell c = cs.current();
+ assertTrue(CellUtil.isPut(c));
+ assertFalse(CellUtil.isDelete(c));
+ assertFalse(cs.advance());
+ assertNull(scanner.next());
+ }
+
+ Delete d = new Delete(row);
+ d.addColumn(FAMILY, QUALIFIER);
+ table.delete(d);
+
+ Scan scan = new Scan();
+ scan.setRaw(true);
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ Result result = scanner.next();
+ assertNotNull(result);
+ CellScanner cs = result.cellScanner();
+ assertTrue(cs.advance());
+
+ // First cell should be the delete (masking the Put)
+ Cell c = cs.current();
+ assertTrue("Cell should be a Delete: " + c, CellUtil.isDelete(c));
+ assertFalse("Cell should not be a Put: " + c, CellUtil.isPut(c));
+
+ // Second cell should be the original Put
+ assertTrue(cs.advance());
+ c = cs.current();
+ assertFalse("Cell should not be a Delete: " + c, CellUtil.isDelete(c));
+ assertTrue("Cell should be a Put: " + c, CellUtil.isPut(c));
+
+ // No more cells in this row
+ assertFalse(cs.advance());
+
+ // No more results in this scan
+ assertNull(scanner.next());
+ }
+ }
}
[09/15] hbase git commit: HBASE-19198 TestIPv6NIOServerSocketChannel
fails; unable to bind
Posted by bu...@apache.org.
HBASE-19198 TestIPv6NIOServerSocketChannel fails; unable to bind
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d1b6d8c9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d1b6d8c9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d1b6d8c9
Branch: refs/heads/HBASE-19189
Commit: d1b6d8c90692d2ccf9a9e5c9c6186d62a0b2b553
Parents: b6011a1
Author: Michael Stack <st...@duboce.net>
Authored: Mon Nov 6 21:19:51 2017 -0800
Committer: Michael Stack <st...@duboce.net>
Committed: Mon Nov 6 21:20:04 2017 -0800
----------------------------------------------------------------------
.../apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b6d8c9/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java
index d4f4ada..e63eaf2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java
@@ -49,7 +49,6 @@ import org.junit.rules.TestRule;
*/
@Category({MiscTests.class, SmallTests.class})
public class TestIPv6NIOServerSocketChannel {
-
private static final Log LOG = LogFactory.getLog(TestIPv6NIOServerSocketChannel.class);
@Rule
@@ -69,6 +68,7 @@ public class TestIPv6NIOServerSocketChannel {
break;
} catch (BindException ex) {
//continue
+ LOG.info("Failed on " + addr + ", inedAddr=" + inetAddr, ex);
} finally {
if (serverSocket != null) {
serverSocket.close();
@@ -151,9 +151,9 @@ public class TestIPv6NIOServerSocketChannel {
*/
@Test
public void testServerSocketFromLocalhostResolution() throws IOException {
- InetAddress[] addrs = InetAddress.getAllByName("localhost");
+ InetAddress[] addrs = {InetAddress.getLocalHost()};
for (InetAddress addr : addrs) {
- LOG.info("resolved localhost as:" + addr);
+ LOG.info("Resolved localhost as: " + addr);
bindServerSocket(addr);
bindNIOServerSocket(addr);
}
[04/15] hbase git commit: HBASE-18950 Remove Optional parameters in
AsyncAdmin interface
Posted by bu...@apache.org.
HBASE-18950 Remove Optional parameters in AsyncAdmin interface
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/888f2335
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/888f2335
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/888f2335
Branch: refs/heads/HBASE-19189
Commit: 888f2335c952040646ce820f6191f6433ec9411d
Parents: bc3f3ee
Author: Guanghao Zhang <zg...@apache.org>
Authored: Mon Oct 23 11:22:00 2017 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Mon Nov 6 20:30:59 2017 +0800
----------------------------------------------------------------------
.../apache/hadoop/hbase/client/AsyncAdmin.java | 132 ++---
.../hadoop/hbase/client/AsyncHBaseAdmin.java | 114 ++++-
.../client/AsyncRpcRetryingCallerFactory.java | 8 +-
.../apache/hadoop/hbase/client/HBaseAdmin.java | 6 +-
.../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 477 +++++++++++++------
.../hbase/shaded/protobuf/ProtobufUtil.java | 11 +-
.../hbase/shaded/protobuf/RequestConverter.java | 403 ++++++----------
...gionServerBulkLoadWithOldSecureEndpoint.java | 3 +-
.../hadoop/hbase/client/TestAsyncAdminBase.java | 18 +-
.../hbase/client/TestAsyncClusterAdminApi.java | 3 +-
.../hbase/client/TestAsyncRegionAdminApi.java | 23 +-
.../hbase/client/TestAsyncSnapshotAdminApi.java | 12 +-
.../hbase/client/TestAsyncTableAdminApi.java | 22 +-
.../hbase/coprocessor/TestMasterObserver.java | 9 +-
.../regionserver/TestHRegionServerBulkLoad.java | 2 +-
.../TestHRegionServerBulkLoadWithOldClient.java | 3 +-
16 files changed, 713 insertions(+), 533 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 8fe02b9..baae6cf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -64,38 +64,49 @@ public interface AsyncAdmin {
/**
* List all the userspace tables.
* @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}.
- * @see #listTables(Optional, boolean)
*/
default CompletableFuture<List<TableDescriptor>> listTables() {
- return listTables(Optional.empty(), false);
+ return listTables(false);
}
/**
+ * List all the tables.
+ * @param includeSysTables False to match only against userspace tables
+ * @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}.
+ */
+ CompletableFuture<List<TableDescriptor>> listTables(boolean includeSysTables);
+
+ /**
* List all the tables matching the given pattern.
* @param pattern The compiled regular expression to match against
* @param includeSysTables False to match only against userspace tables
* @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}.
*/
- CompletableFuture<List<TableDescriptor>> listTables(Optional<Pattern> pattern,
- boolean includeSysTables);
+ CompletableFuture<List<TableDescriptor>> listTables(Pattern pattern, boolean includeSysTables);
/**
* List all of the names of userspace tables.
* @return a list of table names wrapped by a {@link CompletableFuture}.
- * @see #listTableNames(Optional, boolean)
+ * @see #listTableNames(Pattern, boolean)
*/
default CompletableFuture<List<TableName>> listTableNames() {
- return listTableNames(Optional.empty(), false);
+ return listTableNames(false);
}
/**
+ * List all of the names of tables.
+ * @param includeSysTables False to match only against userspace tables
+ * @return a list of table names wrapped by a {@link CompletableFuture}.
+ */
+ CompletableFuture<List<TableName>> listTableNames(boolean includeSysTables);
+
+ /**
* List all of the names of userspace tables.
* @param pattern The regular expression to match against
* @param includeSysTables False to match only against userspace tables
* @return a list of table names wrapped by a {@link CompletableFuture}.
*/
- CompletableFuture<List<TableName>> listTableNames(Optional<Pattern> pattern,
- boolean includeSysTables);
+ CompletableFuture<List<TableName>> listTableNames(Pattern pattern, boolean includeSysTables);
/**
* Method for getting the tableDescriptor
@@ -108,9 +119,7 @@ public interface AsyncAdmin {
* Creates a new table.
* @param desc table descriptor for table
*/
- default CompletableFuture<Void> createTable(TableDescriptor desc) {
- return createTable(desc, Optional.empty());
- }
+ CompletableFuture<Void> createTable(TableDescriptor desc);
/**
* Creates a new table with the specified number of regions. The start key specified will become
@@ -133,7 +142,7 @@ public interface AsyncAdmin {
* @param desc table descriptor for table
* @param splitKeys array of split keys for the initial regions of the table
*/
- CompletableFuture<Void> createTable(TableDescriptor desc, Optional<byte[][]> splitKeys);
+ CompletableFuture<Void> createTable(TableDescriptor desc, byte[][] splitKeys);
/**
* Deletes a table.
@@ -179,9 +188,7 @@ public interface AsyncAdmin {
* @return true if all regions of the table are available. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
- default CompletableFuture<Boolean> isTableAvailable(TableName tableName) {
- return isTableAvailable(tableName, null);
- }
+ CompletableFuture<Boolean> isTableAvailable(TableName tableName);
/**
* Use this api to check if the table has been created with the specified number of splitkeys
@@ -274,9 +281,7 @@ public interface AsyncAdmin {
* was sent to HBase and may need some time to finish the compact operation.
* @param tableName table to compact
*/
- default CompletableFuture<Void> compact(TableName tableName) {
- return compact(tableName, Optional.empty());
- }
+ CompletableFuture<Void> compact(TableName tableName);
/**
* Compact a column family within a table. When the returned CompletableFuture is done, it only
@@ -286,16 +291,14 @@ public interface AsyncAdmin {
* @param columnFamily column family within a table. If not present, compact the table's all
* column families.
*/
- CompletableFuture<Void> compact(TableName tableName, Optional<byte[]> columnFamily);
+ CompletableFuture<Void> compact(TableName tableName, byte[] columnFamily);
/**
* Compact an individual region. When the returned CompletableFuture is done, it only means the
* compact request was sent to HBase and may need some time to finish the compact operation.
* @param regionName region to compact
*/
- default CompletableFuture<Void> compactRegion(byte[] regionName) {
- return compactRegion(regionName, Optional.empty());
- }
+ CompletableFuture<Void> compactRegion(byte[] regionName);
/**
* Compact a column family within a region. When the returned CompletableFuture is done, it only
@@ -305,16 +308,14 @@ public interface AsyncAdmin {
* @param columnFamily column family within a region. If not present, compact the region's all
* column families.
*/
- CompletableFuture<Void> compactRegion(byte[] regionName, Optional<byte[]> columnFamily);
+ CompletableFuture<Void> compactRegion(byte[] regionName, byte[] columnFamily);
/**
* Major compact a table. When the returned CompletableFuture is done, it only means the compact
* request was sent to HBase and may need some time to finish the compact operation.
* @param tableName table to major compact
*/
- default CompletableFuture<Void> majorCompact(TableName tableName) {
- return majorCompact(tableName, Optional.empty());
- }
+ CompletableFuture<Void> majorCompact(TableName tableName);
/**
* Major compact a column family within a table. When the returned CompletableFuture is done, it
@@ -324,16 +325,14 @@ public interface AsyncAdmin {
* @param columnFamily column family within a table. If not present, major compact the table's all
* column families.
*/
- CompletableFuture<Void> majorCompact(TableName tableName, Optional<byte[]> columnFamily);
+ CompletableFuture<Void> majorCompact(TableName tableName, byte[] columnFamily);
/**
* Major compact a region. When the returned CompletableFuture is done, it only means the compact
* request was sent to HBase and may need some time to finish the compact operation.
* @param regionName region to major compact
*/
- default CompletableFuture<Void> majorCompactRegion(byte[] regionName) {
- return majorCompactRegion(regionName, Optional.empty());
- }
+ CompletableFuture<Void> majorCompactRegion(byte[] regionName);
/**
* Major compact a column family within region. When the returned CompletableFuture is done, it
@@ -343,7 +342,7 @@ public interface AsyncAdmin {
* @param columnFamily column family within a region. If not present, major compact the region's
* all column families.
*/
- CompletableFuture<Void> majorCompactRegion(byte[] regionName, Optional<byte[]> columnFamily);
+ CompletableFuture<Void> majorCompactRegion(byte[] regionName, byte[] columnFamily);
/**
* Compact all regions on the region server.
@@ -405,9 +404,7 @@ public interface AsyncAdmin {
* Split an individual region.
* @param regionName region to split
*/
- default CompletableFuture<Void> splitRegion(byte[] regionName) {
- return splitRegion(regionName, Optional.empty());
- }
+ CompletableFuture<Void> splitRegion(byte[] regionName);
/**
* Split a table.
@@ -422,7 +419,7 @@ public interface AsyncAdmin {
* @param splitPoint the explicit position to split on. If not present, it will decide by region
* server.
*/
- CompletableFuture<Void> splitRegion(byte[] regionName, Optional<byte[]> splitPoint);
+ CompletableFuture<Void> splitRegion(byte[] regionName, byte[] splitPoint);
/**
* @param regionName Encoded or full name of region to assign.
@@ -432,7 +429,7 @@ public interface AsyncAdmin {
/**
* Unassign a region from current hosting regionserver. Region will then be assigned to a
* regionserver chosen at random. Region could be reassigned back to the same server. Use
- * {@link #move(byte[], Optional)} if you want to control the region movement.
+ * {@link #move(byte[], ServerName)} if you want to control the region movement.
* @param regionName Encoded or full name of region to unassign. Will clear any existing
* RegionPlan if one found.
* @param forcible If true, force unassign (Will remove region from regions-in-transition too if
@@ -452,13 +449,19 @@ public interface AsyncAdmin {
CompletableFuture<Void> offline(byte[] regionName);
/**
+ * Move the region <code>r</code> to a random server.
+ * @param regionName Encoded or full name of region to move.
+ */
+ CompletableFuture<Void> move(byte[] regionName);
+
+ /**
* Move the region <code>r</code> to <code>dest</code>.
* @param regionName Encoded or full name of region to move.
* @param destServerName The servername of the destination regionserver. If not present, we'll
* assign to a random server. A server name is made of host, port and startcode. Here is
* an example: <code> host187.example.com,60020,1289493121758</code>
*/
- CompletableFuture<Void> move(byte[] regionName, Optional<ServerName> destServerName);
+ CompletableFuture<Void> move(byte[] regionName, ServerName destServerName);
/**
* Apply the new quota settings.
@@ -535,9 +538,7 @@ public interface AsyncAdmin {
* @return a list of replication peers description. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
- default CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers() {
- return listReplicationPeers(Optional.empty());
- }
+ CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers();
/**
* Return a list of replication peers.
@@ -545,8 +546,7 @@ public interface AsyncAdmin {
* @return a list of replication peers description. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
- CompletableFuture<List<ReplicationPeerDescription>>
- listReplicationPeers(Optional<Pattern> pattern);
+ CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers(Pattern pattern);
/**
* Find all table and column families that are replicated from this cluster
@@ -652,16 +652,22 @@ public interface AsyncAdmin {
* @return a list of snapshot descriptors for completed snapshots wrapped by a
* {@link CompletableFuture}
*/
- default CompletableFuture<List<SnapshotDescription>> listSnapshots() {
- return listSnapshots(Optional.empty());
- }
+ CompletableFuture<List<SnapshotDescription>> listSnapshots();
/**
* List all the completed snapshots matching the given pattern.
* @param pattern The compiled regular expression to match against
* @return - returns a List of SnapshotDescription wrapped by a {@link CompletableFuture}
*/
- CompletableFuture<List<SnapshotDescription>> listSnapshots(Optional<Pattern> pattern);
+ CompletableFuture<List<SnapshotDescription>> listSnapshots(Pattern pattern);
+
+ /**
+ * List all the completed snapshots matching the given table name pattern.
+ * @param tableNamePattern The compiled table name regular expression to match against
+ * @return - returns a List of completed SnapshotDescription wrapped by a
+ * {@link CompletableFuture}
+ */
+ CompletableFuture<List<SnapshotDescription>> listTableSnapshots(Pattern tableNamePattern);
/**
* List all the completed snapshots matching the given table name regular expression and snapshot
@@ -681,12 +687,21 @@ public interface AsyncAdmin {
CompletableFuture<Void> deleteSnapshot(String snapshotName);
/**
+ * Delete all existing snapshots.
+ */
+ CompletableFuture<Void> deleteSnapshots();
+
+ /**
* Delete existing snapshots whose names match the pattern passed.
* @param pattern pattern for names of the snapshot to match
*/
- default CompletableFuture<Void> deleteSnapshots(Pattern pattern) {
- return deleteTableSnapshots(null, pattern);
- }
+ CompletableFuture<Void> deleteSnapshots(Pattern pattern);
+
+ /**
+ * Delete all existing snapshots matching the given table name pattern.
+ * @param tableNamePattern The compiled table name regular expression to match against
+ */
+ CompletableFuture<Void> deleteTableSnapshots(Pattern tableNamePattern);
/**
* Delete all existing snapshots matching the given table name regular expression and snapshot
@@ -823,15 +838,6 @@ public interface AsyncAdmin {
}
/**
- * Get a list of {@link RegionLoad} of all regions hosted on a region seerver.
- * @param serverName
- * @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture}
- */
- default CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName) {
- return getRegionLoads(serverName, Optional.empty());
- }
-
- /**
* Shuts down the HBase cluster.
*/
CompletableFuture<Void> shutdown();
@@ -878,13 +884,19 @@ public interface AsyncAdmin {
CompletableFuture<Void> clearCompactionQueues(ServerName serverName, Set<String> queues);
/**
+ * Get a list of {@link RegionLoad} of all regions hosted on a region seerver.
+ * @param serverName
+ * @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture}
+ */
+ CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName);
+
+ /**
* Get a list of {@link RegionLoad} of all regions hosted on a region seerver for a table.
* @param serverName
* @param tableName
* @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture}
*/
- CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName,
- Optional<TableName> tableName);
+ CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName, TableName tableName);
/**
* Check whether master is in maintenance mode
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index 23dea81..04005eb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
import com.google.protobuf.RpcChannel;
@@ -84,13 +83,23 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<List<TableDescriptor>> listTables(Optional<Pattern> pattern,
+ public CompletableFuture<List<TableDescriptor>> listTables(boolean includeSysTables) {
+ return wrap(rawAdmin.listTables(includeSysTables));
+ }
+
+ @Override
+ public CompletableFuture<List<TableDescriptor>> listTables(Pattern pattern,
boolean includeSysTables) {
return wrap(rawAdmin.listTables(pattern, includeSysTables));
}
@Override
- public CompletableFuture<List<TableName>> listTableNames(Optional<Pattern> pattern,
+ public CompletableFuture<List<TableName>> listTableNames(boolean includeSysTables) {
+ return wrap(rawAdmin.listTableNames(includeSysTables));
+ }
+
+ @Override
+ public CompletableFuture<List<TableName>> listTableNames(Pattern pattern,
boolean includeSysTables) {
return wrap(rawAdmin.listTableNames(pattern, includeSysTables));
}
@@ -101,13 +110,18 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<Void> createTable(TableDescriptor desc) {
+ return wrap(rawAdmin.createTable(desc));
+ }
+
+ @Override
public CompletableFuture<Void> createTable(TableDescriptor desc, byte[] startKey, byte[] endKey,
int numRegions) {
return wrap(rawAdmin.createTable(desc, startKey, endKey, numRegions));
}
@Override
- public CompletableFuture<Void> createTable(TableDescriptor desc, Optional<byte[][]> splitKeys) {
+ public CompletableFuture<Void> createTable(TableDescriptor desc, byte[][] splitKeys) {
return wrap(rawAdmin.createTable(desc, splitKeys));
}
@@ -142,6 +156,11 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<Boolean> isTableAvailable(TableName tableName) {
+ return wrap(rawAdmin.isTableAvailable(tableName));
+ }
+
+ @Override
public CompletableFuture<Boolean> isTableAvailable(TableName tableName, byte[][] splitKeys) {
return wrap(rawAdmin.isTableAvailable(tableName, splitKeys));
}
@@ -209,23 +228,42 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<Void> compact(TableName tableName, Optional<byte[]> columnFamily) {
+ public CompletableFuture<Void> compact(TableName tableName) {
+ return wrap(rawAdmin.compact(tableName));
+ }
+
+ @Override
+ public CompletableFuture<Void> compact(TableName tableName, byte[] columnFamily) {
return wrap(rawAdmin.compact(tableName, columnFamily));
}
@Override
- public CompletableFuture<Void> compactRegion(byte[] regionName, Optional<byte[]> columnFamily) {
+ public CompletableFuture<Void> compactRegion(byte[] regionName) {
+ return wrap(rawAdmin.compactRegion(regionName));
+ }
+
+ @Override
+ public CompletableFuture<Void> compactRegion(byte[] regionName, byte[] columnFamily) {
return wrap(rawAdmin.compactRegion(regionName, columnFamily));
}
@Override
- public CompletableFuture<Void> majorCompact(TableName tableName, Optional<byte[]> columnFamily) {
+ public CompletableFuture<Void> majorCompact(TableName tableName) {
+ return wrap(rawAdmin.majorCompact(tableName));
+ }
+
+ @Override
+ public CompletableFuture<Void> majorCompact(TableName tableName, byte[] columnFamily) {
return wrap(rawAdmin.majorCompact(tableName, columnFamily));
}
@Override
- public CompletableFuture<Void>
- majorCompactRegion(byte[] regionName, Optional<byte[]> columnFamily) {
+ public CompletableFuture<Void> majorCompactRegion(byte[] regionName) {
+ return wrap(rawAdmin.majorCompactRegion(regionName));
+ }
+
+ @Override
+ public CompletableFuture<Void> majorCompactRegion(byte[] regionName, byte[] columnFamily) {
return wrap(rawAdmin.majorCompactRegion(regionName, columnFamily));
}
@@ -276,7 +314,12 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<Void> splitRegion(byte[] regionName, Optional<byte[]> splitPoint) {
+ public CompletableFuture<Void> splitRegion(byte[] regionName) {
+ return wrap(rawAdmin.splitRegion(regionName));
+ }
+
+ @Override
+ public CompletableFuture<Void> splitRegion(byte[] regionName, byte[] splitPoint) {
return wrap(rawAdmin.splitRegion(regionName, splitPoint));
}
@@ -296,7 +339,12 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<Void> move(byte[] regionName, Optional<ServerName> destServerName) {
+ public CompletableFuture<Void> move(byte[] regionName) {
+ return wrap(rawAdmin.move(regionName));
+ }
+
+ @Override
+ public CompletableFuture<Void> move(byte[] regionName, ServerName destServerName) {
return wrap(rawAdmin.move(regionName, destServerName));
}
@@ -355,8 +403,12 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers(
- Optional<Pattern> pattern) {
+ public CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers() {
+ return wrap(rawAdmin.listReplicationPeers());
+ }
+
+ @Override
+ public CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers(Pattern pattern) {
return wrap(rawAdmin.listReplicationPeers(pattern));
}
@@ -391,11 +443,21 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<List<SnapshotDescription>> listSnapshots(Optional<Pattern> pattern) {
+ public CompletableFuture<List<SnapshotDescription>> listSnapshots() {
+ return wrap(rawAdmin.listSnapshots());
+ }
+
+ @Override
+ public CompletableFuture<List<SnapshotDescription>> listSnapshots(Pattern pattern) {
return wrap(rawAdmin.listSnapshots(pattern));
}
@Override
+ public CompletableFuture<List<SnapshotDescription>> listTableSnapshots(Pattern tableNamePattern) {
+ return wrap(rawAdmin.listTableSnapshots(tableNamePattern));
+ }
+
+ @Override
public CompletableFuture<List<SnapshotDescription>> listTableSnapshots(Pattern tableNamePattern,
Pattern snapshotNamePattern) {
return wrap(rawAdmin.listTableSnapshots(tableNamePattern, snapshotNamePattern));
@@ -407,6 +469,21 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<Void> deleteSnapshots() {
+ return wrap(rawAdmin.deleteSnapshots());
+ }
+
+ @Override
+ public CompletableFuture<Void> deleteSnapshots(Pattern pattern) {
+ return wrap(rawAdmin.deleteSnapshots(pattern));
+ }
+
+ @Override
+ public CompletableFuture<Void> deleteTableSnapshots(Pattern tableNamePattern) {
+ return wrap(rawAdmin.deleteTableSnapshots(tableNamePattern));
+ }
+
+ @Override
public CompletableFuture<Void> deleteTableSnapshots(Pattern tableNamePattern,
Pattern snapshotNamePattern) {
return wrap(rawAdmin.deleteTableSnapshots(tableNamePattern, snapshotNamePattern));
@@ -513,8 +590,13 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName) {
+ return wrap(rawAdmin.getRegionLoads(serverName));
+ }
+
+ @Override
public CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName,
- Optional<TableName> tableName) {
+ TableName tableName) {
return wrap(rawAdmin.getRegionLoads(serverName, tableName));
}
@@ -625,4 +707,4 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
public CompletableFuture<List<ServerName>> clearDeadServers(List<ServerName> servers) {
return wrap(rawAdmin.clearDeadServers(servers));
}
-}
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
index b687668..9c45883 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
@@ -427,8 +427,8 @@ class AsyncRpcRetryingCallerFactory {
public AsyncAdminRequestRetryingCaller<T> build() {
return new AsyncAdminRequestRetryingCaller<T>(retryTimer, conn, pauseNs, maxAttempts,
- operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, serverName, checkNotNull(callable,
- "action is null"));
+ operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, checkNotNull(serverName,
+ "serverName is null"), checkNotNull(callable, "action is null"));
}
public CompletableFuture<T> call() {
@@ -488,8 +488,8 @@ class AsyncRpcRetryingCallerFactory {
public AsyncServerRequestRpcRetryingCaller<T> build() {
return new AsyncServerRequestRpcRetryingCaller<T>(retryTimer, conn, pauseNs, maxAttempts,
- operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, serverName, checkNotNull(callable,
- "action is null"));
+ operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, checkNotNull(serverName,
+ "serverName is null"), checkNotNull(callable, "action is null"));
}
public CompletableFuture<T> call() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index c090325..556e564 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -1371,14 +1371,14 @@ public class HBaseAdmin implements Admin {
}
@Override
- public void move(final byte [] encodedRegionName, final byte [] destServerName)
- throws IOException {
+ public void move(final byte[] encodedRegionName, final byte[] destServerName) throws IOException {
executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
@Override
protected Void rpcCall() throws Exception {
setPriority(encodedRegionName);
MoveRegionRequest request =
- RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
+ RequestConverter.buildMoveRegionRequest(encodedRegionName,
+ destServerName != null ? ServerName.valueOf(Bytes.toString(destServerName)) : null);
master.moveRegion(getRpcController(), request);
return null;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 1622497..1d80797 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -26,7 +26,6 @@ import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
-import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -82,9 +81,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
-
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
import org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
import org.apache.hadoop.hbase.shaded.io.netty.util.TimerTask;
@@ -245,6 +242,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Updat
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
import com.google.protobuf.Message;
import com.google.protobuf.RpcChannel;
@@ -385,28 +383,54 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<List<TableDescriptor>> listTables(Optional<Pattern> pattern,
+ public CompletableFuture<List<TableDescriptor>> listTables(boolean includeSysTables) {
+ return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(null,
+ includeSysTables));
+ }
+
+ /**
+ * {@link #listTables(boolean)}
+ */
+ @Override
+ public CompletableFuture<List<TableDescriptor>> listTables(Pattern pattern,
boolean includeSysTables) {
+ Preconditions.checkNotNull(pattern,
+ "pattern is null. If you don't specify a pattern, use listTables(boolean) instead");
+ return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(pattern,
+ includeSysTables));
+ }
+
+ private CompletableFuture<List<TableDescriptor>>
+ getTableDescriptors(GetTableDescriptorsRequest request) {
return this.<List<TableDescriptor>> newMasterCaller()
.action((controller, stub) -> this
.<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableDescriptor>> call(
- controller, stub,
- RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables),
- (s, c, req, done) -> s.getTableDescriptors(c, req, done),
+ controller, stub, request, (s, c, req, done) -> s.getTableDescriptors(c, req, done),
(resp) -> ProtobufUtil.toTableDescriptorList(resp)))
.call();
}
@Override
- public CompletableFuture<List<TableName>> listTableNames(Optional<Pattern> pattern,
- boolean includeSysTables) {
- return this.<List<TableName>> newMasterCaller()
- .action((controller, stub) -> this
- .<GetTableNamesRequest, GetTableNamesResponse, List<TableName>> call(controller, stub,
- RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables),
- (s, c, req, done) -> s.getTableNames(c, req, done),
- (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList())))
- .call();
+ public CompletableFuture<List<TableName>> listTableNames(boolean includeSysTables) {
+ return getTableNames(RequestConverter.buildGetTableNamesRequest(null, includeSysTables));
+ }
+
+ @Override
+ public CompletableFuture<List<TableName>>
+ listTableNames(Pattern pattern, boolean includeSysTables) {
+ Preconditions.checkNotNull(pattern,
+ "pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead");
+ return getTableNames(RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables));
+ }
+
+ private CompletableFuture<List<TableName>> getTableNames(GetTableNamesRequest request) {
+ return this
+ .<List<TableName>> newMasterCaller()
+ .action(
+ (controller, stub) -> this
+ .<GetTableNamesRequest, GetTableNamesResponse, List<TableName>> call(controller,
+ stub, request, (s, c, req, done) -> s.getTableNames(c, req, done),
+ (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))).call();
}
@Override
@@ -433,31 +457,41 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<Void> createTable(TableDescriptor desc) {
+ return createTable(desc.getTableName(),
+ RequestConverter.buildCreateTableRequest(desc, null, ng.getNonceGroup(), ng.newNonce()));
+ }
+
+ @Override
public CompletableFuture<Void> createTable(TableDescriptor desc, byte[] startKey, byte[] endKey,
int numRegions) {
try {
- return createTable(desc, Optional.of(getSplitKeys(startKey, endKey, numRegions)));
+ return createTable(desc, getSplitKeys(startKey, endKey, numRegions));
} catch (IllegalArgumentException e) {
return failedFuture(e);
}
}
@Override
- public CompletableFuture<Void> createTable(TableDescriptor desc, Optional<byte[][]> splitKeys) {
- if (desc.getTableName() == null) {
- return failedFuture(new IllegalArgumentException("TableName cannot be null"));
- }
+ public CompletableFuture<Void> createTable(TableDescriptor desc, byte[][] splitKeys) {
+ Preconditions.checkNotNull(splitKeys, "splitKeys is null. If you don't specify splitKeys,"
+ + " use createTable(TableDescriptor) instead");
try {
- splitKeys.ifPresent(keys -> verifySplitKeys(keys));
- return this.<CreateTableRequest, CreateTableResponse> procedureCall(RequestConverter
- .buildCreateTableRequest(desc, splitKeys, ng.getNonceGroup(), ng.newNonce()), (s, c, req,
- done) -> s.createTable(c, req, done), (resp) -> resp.getProcId(),
- new CreateTableProcedureBiConsumer(this, desc.getTableName()));
+ verifySplitKeys(splitKeys);
+ return createTable(desc.getTableName(), RequestConverter.buildCreateTableRequest(desc,
+ splitKeys, ng.getNonceGroup(), ng.newNonce()));
} catch (IllegalArgumentException e) {
return failedFuture(e);
}
}
+ private CompletableFuture<Void> createTable(TableName tableName, CreateTableRequest request) {
+ Preconditions.checkNotNull(tableName, "table name is null");
+ return this.<CreateTableRequest, CreateTableResponse> procedureCall(request,
+ (s, c, req, done) -> s.createTable(c, req, done), (resp) -> resp.getProcId(),
+ new CreateTableProcedureBiConsumer(this, tableName));
+ }
+
@Override
public CompletableFuture<Void> deleteTable(TableName tableName) {
return this.<DeleteTableRequest, DeleteTableResponse> procedureCall(RequestConverter
@@ -526,11 +560,18 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
@Override
public CompletableFuture<Boolean> isTableAvailable(TableName tableName) {
- return isTableAvailable(tableName, null);
+ return isTableAvailable(tableName, Optional.empty());
}
@Override
public CompletableFuture<Boolean> isTableAvailable(TableName tableName, byte[][] splitKeys) {
+ Preconditions.checkNotNull(splitKeys, "splitKeys is null. If you don't specify splitKeys,"
+ + " use isTableAvailable(TableName) instead");
+ return isTableAvailable(tableName, Optional.of(splitKeys));
+ }
+
+ private CompletableFuture<Boolean> isTableAvailable(TableName tableName,
+ Optional<byte[][]> splitKeys) {
CompletableFuture<Boolean> future = new CompletableFuture<>();
isTableEnabled(tableName).whenComplete(
(enabled, error) -> {
@@ -548,54 +589,46 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
future.completeExceptionally(error1);
return;
}
- int notDeployed = 0;
- int regionCount = 0;
- for (HRegionLocation location : locations) {
- RegionInfo info = location.getRegionInfo();
- if (location.getServerName() == null) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Table " + tableName + " has not deployed region "
- + info.getEncodedName());
- }
- notDeployed++;
- } else if (splitKeys != null
- && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
- for (byte[] splitKey : splitKeys) {
- // Just check if the splitkey is available
- if (Bytes.equals(info.getStartKey(), splitKey)) {
- regionCount++;
- break;
- }
- }
- } else {
- // Always empty start row should be counted
- regionCount++;
- }
- }
- if (notDeployed > 0) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Table " + tableName + " has " + notDeployed + " regions");
- }
- future.complete(false);
- } else if (splitKeys != null && regionCount != splitKeys.length + 1) {
+ List<HRegionLocation> notDeployedRegions =
+ locations.stream().filter(loc -> loc.getServerName() == null)
+ .collect(Collectors.toList());
+ if (notDeployedRegions.size() > 0) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Table " + tableName + " expected to have "
- + (splitKeys.length + 1) + " regions, but only " + regionCount
- + " available");
+ LOG.debug("Table " + tableName + " has " + notDeployedRegions.size()
+ + " regions");
}
future.complete(false);
- } else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Table " + tableName + " should be available");
- }
- future.complete(true);
+ return;
}
+
+ Optional<Boolean> available =
+ splitKeys.map(keys -> compareRegionsWithSplitKeys(locations, keys));
+ future.complete(available.orElse(true));
});
}
});
return future;
}
+ private boolean compareRegionsWithSplitKeys(List<HRegionLocation> locations, byte[][] splitKeys) {
+ int regionCount = 0;
+ for (HRegionLocation location : locations) {
+ RegionInfo info = location.getRegion();
+ if (Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
+ regionCount++;
+ continue;
+ }
+ for (byte[] splitKey : splitKeys) {
+ // Just check if the splitkey is available
+ if (Bytes.equals(info.getStartKey(), splitKey)) {
+ regionCount++;
+ break;
+ }
+ }
+ }
+ return regionCount == splitKeys.length + 1;
+ }
+
@Override
public CompletableFuture<Void> addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) {
return this.<AddColumnRequest, AddColumnResponse> procedureCall(
@@ -757,22 +790,50 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<Void> compact(TableName tableName, Optional<byte[]> columnFamily) {
+ public CompletableFuture<Void> compact(TableName tableName) {
+ return compact(tableName, null, false, CompactType.NORMAL);
+ }
+
+ @Override
+ public CompletableFuture<Void> compact(TableName tableName, byte[] columnFamily) {
+ Preconditions.checkNotNull(columnFamily,
+ "columnFamily is null. If you don't specify a columnFamily, use compact(TableName) instead");
return compact(tableName, columnFamily, false, CompactType.NORMAL);
}
@Override
- public CompletableFuture<Void> compactRegion(byte[] regionName, Optional<byte[]> columnFamily) {
+ public CompletableFuture<Void> compactRegion(byte[] regionName) {
+ return compactRegion(regionName, null, false);
+ }
+
+ @Override
+ public CompletableFuture<Void> compactRegion(byte[] regionName, byte[] columnFamily) {
+ Preconditions.checkNotNull(columnFamily, "columnFamily is null."
+ + " If you don't specify a columnFamily, use compactRegion(regionName) instead");
return compactRegion(regionName, columnFamily, false);
}
@Override
- public CompletableFuture<Void> majorCompact(TableName tableName, Optional<byte[]> columnFamily) {
+ public CompletableFuture<Void> majorCompact(TableName tableName) {
+ return compact(tableName, null, true, CompactType.NORMAL);
+ }
+
+ @Override
+ public CompletableFuture<Void> majorCompact(TableName tableName, byte[] columnFamily) {
+ Preconditions.checkNotNull(columnFamily, "columnFamily is null."
+ + " If you don't specify a columnFamily, use majorCompact(TableName) instead");
return compact(tableName, columnFamily, true, CompactType.NORMAL);
}
@Override
- public CompletableFuture<Void> majorCompactRegion(byte[] regionName, Optional<byte[]> columnFamily) {
+ public CompletableFuture<Void> majorCompactRegion(byte[] regionName) {
+ return compactRegion(regionName, null, true);
+ }
+
+ @Override
+ public CompletableFuture<Void> majorCompactRegion(byte[] regionName, byte[] columnFamily) {
+ Preconditions.checkNotNull(columnFamily, "columnFamily is null."
+ + " If you don't specify a columnFamily, use majorCompactRegion(regionName) instead");
return compactRegion(regionName, columnFamily, true);
}
@@ -795,7 +856,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
List<CompletableFuture<Void>> compactFutures = new ArrayList<>();
if (hRegionInfos != null) {
- hRegionInfos.forEach(region -> compactFutures.add(compact(sn, region, major, Optional.empty())));
+ hRegionInfos.forEach(region -> compactFutures.add(compact(sn, region, major, null)));
}
CompletableFuture
.allOf(compactFutures.toArray(new CompletableFuture<?>[compactFutures.size()]))
@@ -810,7 +871,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
return future;
}
- private CompletableFuture<Void> compactRegion(byte[] regionName, Optional<byte[]> columnFamily,
+ private CompletableFuture<Void> compactRegion(byte[] regionName, byte[] columnFamily,
boolean major) {
CompletableFuture<Void> future = new CompletableFuture<>();
getRegionLocation(regionName).whenComplete(
@@ -868,7 +929,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
/**
* Compact column family of a table, Asynchronous operation even if CompletableFuture.get()
*/
- private CompletableFuture<Void> compact(final TableName tableName, Optional<byte[]> columnFamily,
+ private CompletableFuture<Void> compact(final TableName tableName, byte[] columnFamily,
final boolean major, CompactType compactType) {
if (CompactType.MOB.equals(compactType)) {
// TODO support MOB compact.
@@ -905,7 +966,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
* Compact the region at specific region server.
*/
private CompletableFuture<Void> compact(final ServerName sn, final RegionInfo hri,
- final boolean major, Optional<byte[]> columnFamily) {
+ final boolean major, byte[] columnFamily) {
return this
.<Void> newAdminCaller()
.serverName(sn)
@@ -1078,7 +1139,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
if (hri == null || hri.isSplitParent()
|| hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID)
continue;
- splitFutures.add(split(hri, Optional.empty()));
+ splitFutures.add(split(hri, null));
}
}
}
@@ -1114,7 +1175,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
result.completeExceptionally(new IllegalArgumentException(
"Region does not found: rowKey=" + Bytes.toStringBinary(splitPoint)));
} else {
- splitRegion(loc.getRegionInfo().getRegionName(), Optional.of(splitPoint))
+ splitRegion(loc.getRegionInfo().getRegionName(), splitPoint)
.whenComplete((ret, err2) -> {
if (err2 != null) {
result.completeExceptionally(err2);
@@ -1129,11 +1190,11 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<Void> splitRegion(byte[] regionName, Optional<byte[]> splitPoint) {
+ public CompletableFuture<Void> splitRegion(byte[] regionName) {
CompletableFuture<Void> future = new CompletableFuture<>();
getRegionLocation(regionName).whenComplete(
(location, err) -> {
- RegionInfo regionInfo = location.getRegionInfo();
+ RegionInfo regionInfo = location.getRegion();
if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
future.completeExceptionally(new IllegalArgumentException(
"Can't split replicas directly. "
@@ -1146,7 +1207,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
.toStringBinary(regionName)));
return;
}
- split(regionInfo, splitPoint).whenComplete((ret, err2) -> {
+ split(regionInfo, null).whenComplete((ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
@@ -1157,20 +1218,50 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
return future;
}
- private CompletableFuture<Void> split(final RegionInfo hri,
- Optional<byte[]> splitPoint) {
- if (hri.getStartKey() != null && splitPoint.isPresent()
- && Bytes.compareTo(hri.getStartKey(), splitPoint.get()) == 0) {
- return failedFuture(new IllegalArgumentException(
- "should not give a splitkey which equals to startkey!"));
- }
+ @Override
+ public CompletableFuture<Void> splitRegion(byte[] regionName, byte[] splitPoint) {
+ Preconditions.checkNotNull(splitPoint,
+ "splitPoint is null. If you don't specify a splitPoint, use splitRegion(byte[]) instead");
+ CompletableFuture<Void> future = new CompletableFuture<>();
+ getRegionLocation(regionName).whenComplete(
+ (location, err) -> {
+ RegionInfo regionInfo = location.getRegion();
+ if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
+ future.completeExceptionally(new IllegalArgumentException(
+ "Can't split replicas directly. "
+ + "Replicas are auto-split when their primary is split."));
+ return;
+ }
+ ServerName serverName = location.getServerName();
+ if (serverName == null) {
+ future.completeExceptionally(new NoServerForRegionException(Bytes
+ .toStringBinary(regionName)));
+ return;
+ }
+ if (regionInfo.getStartKey() != null
+ && Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0) {
+ future.completeExceptionally(new IllegalArgumentException(
+ "should not give a splitkey which equals to startkey!"));
+ return;
+ }
+ split(regionInfo, splitPoint).whenComplete((ret, err2) -> {
+ if (err2 != null) {
+ future.completeExceptionally(err2);
+ } else {
+ future.complete(ret);
+ }
+ });
+ });
+ return future;
+ }
+ private CompletableFuture<Void> split(final RegionInfo hri, byte[] splitPoint) {
CompletableFuture<Void> future = new CompletableFuture<>();
TableName tableName = hri.getTable();
SplitTableRegionRequest request = null;
try {
request = RequestConverter
- .buildSplitTableRegionRequest(hri, splitPoint.isPresent() ? splitPoint.get() : null,
+ .buildSplitTableRegionRequest(hri, splitPoint,
ng.getNonceGroup(), ng.newNonce());
} catch (DeserializationException e) {
future.completeExceptionally(e);
@@ -1266,7 +1357,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<Void> move(byte[] regionName, Optional<ServerName> destServerName) {
+ public CompletableFuture<Void> move(byte[] regionName) {
CompletableFuture<Void> future = new CompletableFuture<>();
getRegionInfo(regionName).whenComplete(
(regionInfo, err) -> {
@@ -1274,12 +1365,9 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
future.completeExceptionally(err);
return;
}
- this.<Void> newMasterCaller()
- .action(
- (controller, stub) -> this.<MoveRegionRequest, MoveRegionResponse, Void> call(
- controller, stub, RequestConverter.buildMoveRegionRequest(
- regionInfo.getEncodedNameAsBytes(), destServerName), (s, c, req, done) -> s
- .moveRegion(c, req, done), resp -> null)).call().whenComplete((ret, err2) -> {
+ moveRegion(
+ RequestConverter.buildMoveRegionRequest(regionInfo.getEncodedNameAsBytes(), null))
+ .whenComplete((ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
@@ -1291,6 +1379,37 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<Void> move(byte[] regionName, ServerName destServerName) {
+ Preconditions.checkNotNull(destServerName,
+ "destServerName is null. If you don't specify a destServerName, use move(byte[]) instead");
+ CompletableFuture<Void> future = new CompletableFuture<>();
+ getRegionInfo(regionName).whenComplete((regionInfo, err) -> {
+ if (err != null) {
+ future.completeExceptionally(err);
+ return;
+ }
+ moveRegion(
+ RequestConverter.buildMoveRegionRequest(regionInfo.getEncodedNameAsBytes(), destServerName))
+ .whenComplete((ret, err2) -> {
+ if (err2 != null) {
+ future.completeExceptionally(err2);
+ } else {
+ future.complete(ret);
+ }
+ });
+ });
+ return future;
+ }
+
+ private CompletableFuture<Void> moveRegion(MoveRegionRequest request) {
+ return this
+ .<Void> newMasterCaller()
+ .action(
+ (controller, stub) -> this.<MoveRegionRequest, MoveRegionResponse, Void> call(controller,
+ stub, request, (s, c, req, done) -> s.moveRegion(c, req, done), resp -> null)).call();
+ }
+
+ @Override
public CompletableFuture<Void> setQuota(QuotaSettings quota) {
return this
.<Void> newMasterCaller()
@@ -1452,7 +1571,19 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers(Optional<Pattern> pattern) {
+ public CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers() {
+ return listReplicationPeers(RequestConverter.buildListReplicationPeersRequest(null));
+ }
+
+ @Override
+ public CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers(Pattern pattern) {
+ Preconditions.checkNotNull(pattern,
+ "pattern is null. If you don't specify a pattern, use listReplicationPeers() instead");
+ return listReplicationPeers(RequestConverter.buildListReplicationPeersRequest(pattern));
+ }
+
+ private CompletableFuture<List<ReplicationPeerDescription>> listReplicationPeers(
+ ListReplicationPeersRequest request) {
return this
.<List<ReplicationPeerDescription>> newMasterCaller()
.action(
@@ -1460,7 +1591,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
.<ListReplicationPeersRequest, ListReplicationPeersResponse, List<ReplicationPeerDescription>> call(
controller,
stub,
- RequestConverter.buildListReplicationPeersRequest(pattern),
+ request,
(s, c, req, done) -> s.listReplicationPeers(c, req, done),
(resp) -> resp.getPeerDescList().stream()
.map(ReplicationSerDeHelper::toReplicationPeerDescription)
@@ -1570,7 +1701,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
@Override
public CompletableFuture<Void> restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) {
CompletableFuture<Void> future = new CompletableFuture<>();
- listSnapshots(Optional.of(Pattern.compile(snapshotName))).whenComplete(
+ listSnapshots(Pattern.compile(snapshotName)).whenComplete(
(snapshotDescriptions, err) -> {
if (err != null) {
future.completeExceptionally(err);
@@ -1715,37 +1846,47 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
- public CompletableFuture<List<SnapshotDescription>> listSnapshots(Optional<Pattern> pattern) {
- CompletableFuture<List<SnapshotDescription>> future = new CompletableFuture<>();
- this.<GetCompletedSnapshotsResponse> newMasterCaller()
- .action(
- (controller, stub) -> this
- .<GetCompletedSnapshotsRequest, GetCompletedSnapshotsResponse, GetCompletedSnapshotsResponse> call(
- controller, stub, GetCompletedSnapshotsRequest.newBuilder().build(), (s, c, req,
- done) -> s.getCompletedSnapshots(c, req, done), resp -> resp))
- .call()
- .whenComplete(
- (resp, err) -> {
- if (err != null) {
- future.completeExceptionally(err);
- return;
- }
- future.complete(resp
- .getSnapshotsList()
- .stream()
- .map(ProtobufUtil::createSnapshotDesc)
- .filter(
- snap -> pattern.isPresent() ? pattern.get().matcher(snap.getName()).matches()
- : true).collect(Collectors.toList()));
- });
- return future;
+ public CompletableFuture<List<SnapshotDescription>> listSnapshots() {
+ return getCompletedSnapshots(null);
+ }
+
+ @Override
+ public CompletableFuture<List<SnapshotDescription>> listSnapshots(Pattern pattern) {
+ Preconditions.checkNotNull(pattern,
+ "pattern is null. If you don't specify a pattern, use listSnapshots() instead");
+ return getCompletedSnapshots(pattern);
+ }
+
+ private CompletableFuture<List<SnapshotDescription>> getCompletedSnapshots(Pattern pattern) {
+ return this.<List<SnapshotDescription>> newMasterCaller().action((controller, stub) -> this
+ .<GetCompletedSnapshotsRequest, GetCompletedSnapshotsResponse, List<SnapshotDescription>>
+ call(controller, stub, GetCompletedSnapshotsRequest.newBuilder().build(),
+ (s, c, req, done) -> s.getCompletedSnapshots(c, req, done),
+ resp -> ProtobufUtil.toSnapshotDescriptionList(resp, pattern)))
+ .call();
+ }
+
+ @Override
+ public CompletableFuture<List<SnapshotDescription>> listTableSnapshots(Pattern tableNamePattern) {
+ Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null."
+ + " If you don't specify a tableNamePattern, use listSnapshots() instead");
+ return getCompletedSnapshots(tableNamePattern, null);
}
@Override
public CompletableFuture<List<SnapshotDescription>> listTableSnapshots(Pattern tableNamePattern,
Pattern snapshotNamePattern) {
+ Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null."
+ + " If you don't specify a tableNamePattern, use listSnapshots(Pattern) instead");
+ Preconditions.checkNotNull(snapshotNamePattern, "snapshotNamePattern is null."
+ + " If you don't specify a snapshotNamePattern, use listTableSnapshots(Pattern) instead");
+ return getCompletedSnapshots(tableNamePattern, snapshotNamePattern);
+ }
+
+ private CompletableFuture<List<SnapshotDescription>> getCompletedSnapshots(
+ Pattern tableNamePattern, Pattern snapshotNamePattern) {
CompletableFuture<List<SnapshotDescription>> future = new CompletableFuture<>();
- listTableNames(Optional.ofNullable(tableNamePattern), false).whenComplete(
+ listTableNames(tableNamePattern, false).whenComplete(
(tableNames, err) -> {
if (err != null) {
future.completeExceptionally(err);
@@ -1755,7 +1896,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
future.complete(Collections.emptyList());
return;
}
- listSnapshots(Optional.ofNullable(snapshotNamePattern)).whenComplete(
+ getCompletedSnapshots(snapshotNamePattern).whenComplete(
(snapshotDescList, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
@@ -1779,31 +1920,59 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<Void> deleteSnapshots() {
+ return internalDeleteSnapshots(null, null);
+ }
+
+ @Override
public CompletableFuture<Void> deleteSnapshots(Pattern snapshotNamePattern) {
- return deleteTableSnapshots(null, snapshotNamePattern);
+ Preconditions.checkNotNull(snapshotNamePattern, "snapshotNamePattern is null."
+ + " If you don't specify a snapshotNamePattern, use deleteSnapshots() instead");
+ return internalDeleteSnapshots(null, snapshotNamePattern);
+ }
+
+ @Override
+ public CompletableFuture<Void> deleteTableSnapshots(Pattern tableNamePattern) {
+ Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null."
+ + " If you don't specify a tableNamePattern, use deleteSnapshots() instead");
+ return internalDeleteSnapshots(tableNamePattern, null);
}
@Override
public CompletableFuture<Void> deleteTableSnapshots(Pattern tableNamePattern,
Pattern snapshotNamePattern) {
+ Preconditions.checkNotNull(tableNamePattern, "tableNamePattern is null."
+ + " If you don't specify a tableNamePattern, use deleteSnapshots(Pattern) instead");
+ Preconditions.checkNotNull(snapshotNamePattern, "snapshotNamePattern is null."
+ + " If you don't specify a snapshotNamePattern, use deleteSnapshots(Pattern) instead");
+ return internalDeleteSnapshots(tableNamePattern, snapshotNamePattern);
+ }
+
+ private CompletableFuture<Void> internalDeleteSnapshots(Pattern tableNamePattern,
+ Pattern snapshotNamePattern) {
+ CompletableFuture<List<SnapshotDescription>> listSnapshotsFuture;
+ if (tableNamePattern == null) {
+ listSnapshotsFuture = getCompletedSnapshots(snapshotNamePattern);
+ } else {
+ listSnapshotsFuture = getCompletedSnapshots(tableNamePattern, snapshotNamePattern);
+ }
CompletableFuture<Void> future = new CompletableFuture<>();
- listTableSnapshots(tableNamePattern, snapshotNamePattern).whenComplete(
- ((snapshotDescriptions, err) -> {
- if (err != null) {
- future.completeExceptionally(err);
- return;
- }
- if (snapshotDescriptions == null || snapshotDescriptions.isEmpty()) {
- future.complete(null);
- return;
- }
- List<CompletableFuture<Void>> deleteSnapshotFutures = new ArrayList<>();
- snapshotDescriptions.forEach(snapDesc -> deleteSnapshotFutures
- .add(internalDeleteSnapshot(snapDesc)));
- CompletableFuture.allOf(
- deleteSnapshotFutures.toArray(new CompletableFuture<?>[deleteSnapshotFutures.size()]))
- .thenAccept(v -> future.complete(v));
- }));
+ listSnapshotsFuture.whenComplete(((snapshotDescriptions, err) -> {
+ if (err != null) {
+ future.completeExceptionally(err);
+ return;
+ }
+ if (snapshotDescriptions == null || snapshotDescriptions.isEmpty()) {
+ future.complete(null);
+ return;
+ }
+ List<CompletableFuture<Void>> deleteSnapshotFutures = new ArrayList<>();
+ snapshotDescriptions.forEach(snapDesc -> deleteSnapshotFutures
+ .add(internalDeleteSnapshot(snapDesc)));
+ CompletableFuture.allOf(
+ deleteSnapshotFutures.toArray(new CompletableFuture<?>[deleteSnapshotFutures.size()]))
+ .thenAccept(v -> future.complete(v));
+ }));
return future;
}
@@ -2485,16 +2654,26 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
+ public CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName) {
+ return getRegionLoad(GetRegionLoadRequest.newBuilder().build(), serverName);
+ }
+
+ @Override
public CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName,
- Optional<TableName> tableName) {
- return this
- .<List<RegionLoad>> newAdminCaller()
- .action(
- (controller, stub) -> this
- .<GetRegionLoadRequest, GetRegionLoadResponse, List<RegionLoad>> adminCall(
- controller, stub, RequestConverter.buildGetRegionLoadRequest(tableName), (s, c,
- req, done) -> s.getRegionLoad(controller, req, done),
- ProtobufUtil::getRegionLoadInfo)).serverName(serverName).call();
+ TableName tableName) {
+ Preconditions.checkNotNull(tableName,
+ "tableName is null. If you don't specify a tableName, use getRegionLoads() instead");
+ return getRegionLoad(RequestConverter.buildGetRegionLoadRequest(tableName), serverName);
+ }
+
+ private CompletableFuture<List<RegionLoad>> getRegionLoad(GetRegionLoadRequest request,
+ ServerName serverName) {
+ return this.<List<RegionLoad>> newAdminCaller()
+ .action((controller, stub) -> this
+ .<GetRegionLoadRequest, GetRegionLoadResponse, List<RegionLoad>> adminCall(controller,
+ stub, request, (s, c, req, done) -> s.getRegionLoad(controller, req, done),
+ ProtobufUtil::getRegionLoadInfo))
+ .serverName(serverName).call();
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 9024cdd..7953b8a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -36,6 +36,7 @@ import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
+import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@@ -158,6 +159,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
@@ -1733,7 +1735,7 @@ public final class ProtobufUtil {
final RpcController controller, final AdminService.BlockingInterface admin,
final TableName tableName) throws IOException {
GetRegionLoadRequest request =
- RequestConverter.buildGetRegionLoadRequest(Optional.ofNullable(tableName));
+ RequestConverter.buildGetRegionLoadRequest(tableName);
GetRegionLoadResponse response;
try {
response = admin.getRegionLoad(controller, request);
@@ -3376,4 +3378,11 @@ public final class ProtobufUtil {
}
return rib.build();
}
+
+ public static List<SnapshotDescription> toSnapshotDescriptionList(
+ GetCompletedSnapshotsResponse response, Pattern pattern) {
+ return response.getSnapshotsList().stream().map(ProtobufUtil::createSnapshotDesc)
+ .filter(snap -> pattern != null ? pattern.matcher(snap.getName()).matches() : true)
+ .collect(Collectors.toList());
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 72bd324..4ad28f2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -19,11 +19,9 @@ package org.apache.hadoop.hbase.shaded.protobuf;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
-import java.util.Optional;
import java.util.Set;
import java.util.regex.Pattern;
@@ -824,132 +822,112 @@ public final class RequestConverter {
* Create a protocol buffer GetRegionLoadRequest for all regions/regions of a table.
* @param tableName the table for which regionLoad should be obtained from RS
* @return a protocol buffer GetRegionLoadRequest
- * @deprecated use {@link #buildGetRegionLoadRequest(Optional)} instead.
*/
- @Deprecated
public static GetRegionLoadRequest buildGetRegionLoadRequest(final TableName tableName) {
- return buildGetRegionLoadRequest(Optional.ofNullable(tableName));
+ GetRegionLoadRequest.Builder builder = GetRegionLoadRequest.newBuilder();
+ if (tableName != null) {
+ builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
+ }
+ return builder.build();
}
/**
- * Create a protocol buffer GetRegionLoadRequest for all regions/regions of a table.
- * @param tableName the table for which regionLoad should be obtained from RS
- * @return a protocol buffer GetRegionLoadRequest
+ * Create a protocol buffer GetOnlineRegionRequest
+ * @return a protocol buffer GetOnlineRegionRequest
*/
- public static GetRegionLoadRequest buildGetRegionLoadRequest(Optional<TableName> tableName) {
- GetRegionLoadRequest.Builder builder = GetRegionLoadRequest.newBuilder();
- tableName.ifPresent(table -> builder.setTableName(ProtobufUtil.toProtoTableName(table)));
+ public static GetOnlineRegionRequest buildGetOnlineRegionRequest() {
+ return GetOnlineRegionRequest.newBuilder().build();
+ }
+
+ /**
+ * Create a protocol buffer FlushRegionRequest for a given region name
+ * @param regionName the name of the region to get info
+ * @return a protocol buffer FlushRegionRequest
+ */
+ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName) {
+ return buildFlushRegionRequest(regionName, false);
+ }
+
+ /**
+ * Create a protocol buffer FlushRegionRequest for a given region name
+ * @param regionName the name of the region to get info
+ * @return a protocol buffer FlushRegionRequest
+ */
+ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName,
+ boolean writeFlushWALMarker) {
+ FlushRegionRequest.Builder builder = FlushRegionRequest.newBuilder();
+ RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
+ builder.setRegion(region);
+ builder.setWriteFlushWalMarker(writeFlushWALMarker);
return builder.build();
}
- /**
- * Create a protocol buffer GetOnlineRegionRequest
- *
- * @return a protocol buffer GetOnlineRegionRequest
- */
- public static GetOnlineRegionRequest buildGetOnlineRegionRequest() {
- return GetOnlineRegionRequest.newBuilder().build();
- }
-
- /**
- * Create a protocol buffer FlushRegionRequest for a given region name
- *
- * @param regionName the name of the region to get info
- * @return a protocol buffer FlushRegionRequest
- */
- public static FlushRegionRequest
- buildFlushRegionRequest(final byte[] regionName) {
- return buildFlushRegionRequest(regionName, false);
- }
-
- /**
- * Create a protocol buffer FlushRegionRequest for a given region name
- *
- * @param regionName the name of the region to get info
- * @return a protocol buffer FlushRegionRequest
- */
- public static FlushRegionRequest
- buildFlushRegionRequest(final byte[] regionName, boolean writeFlushWALMarker) {
- FlushRegionRequest.Builder builder = FlushRegionRequest.newBuilder();
- RegionSpecifier region = buildRegionSpecifier(
- RegionSpecifierType.REGION_NAME, regionName);
- builder.setRegion(region);
- builder.setWriteFlushWalMarker(writeFlushWALMarker);
- return builder.build();
- }
-
- /**
- * Create a protocol buffer OpenRegionRequest to open a list of regions
- *
- * @param server the serverName for the RPC
- * @param regionOpenInfos info of a list of regions to open
- * @param openForReplay
- * @return a protocol buffer OpenRegionRequest
- */
- public static OpenRegionRequest
- buildOpenRegionRequest(ServerName server, final List<Pair<RegionInfo,
- List<ServerName>>> regionOpenInfos, Boolean openForReplay) {
- OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
- for (Pair<RegionInfo, List<ServerName>> regionOpenInfo: regionOpenInfos) {
- builder.addOpenInfo(buildRegionOpenInfo(regionOpenInfo.getFirst(),
- regionOpenInfo.getSecond(), openForReplay));
- }
- if (server != null) {
- builder.setServerStartCode(server.getStartcode());
- }
- // send the master's wall clock time as well, so that the RS can refer to it
- builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
- return builder.build();
- }
-
- /**
- * Create a protocol buffer OpenRegionRequest for a given region
- *
- * @param server the serverName for the RPC
- * @param region the region to open
- * @param favoredNodes
- * @param openForReplay
- * @return a protocol buffer OpenRegionRequest
- */
- public static OpenRegionRequest buildOpenRegionRequest(ServerName server,
- final RegionInfo region, List<ServerName> favoredNodes,
- Boolean openForReplay) {
- OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
- builder.addOpenInfo(buildRegionOpenInfo(region, favoredNodes,
- openForReplay));
- if (server != null) {
- builder.setServerStartCode(server.getStartcode());
- }
- builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
- return builder.build();
- }
-
- /**
- * Create a protocol buffer UpdateFavoredNodesRequest to update a list of favorednode mappings
- * @param updateRegionInfos
- * @return a protocol buffer UpdateFavoredNodesRequest
- */
- public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest(
- final List<Pair<RegionInfo, List<ServerName>>> updateRegionInfos) {
- UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder();
- if (updateRegionInfos != null && !updateRegionInfos.isEmpty()) {
- RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder();
- for (Pair<RegionInfo, List<ServerName>> pair : updateRegionInfos) {
- builder.setRegion(ProtobufUtil.toRegionInfo(pair.getFirst()));
- for (ServerName server : pair.getSecond()) {
- builder.addFavoredNodes(ProtobufUtil.toServerName(server));
+ /**
+ * Create a protocol buffer OpenRegionRequest to open a list of regions
+ * @param server the serverName for the RPC
+ * @param regionOpenInfos info of a list of regions to open
+ * @param openForReplay whether open for replay
+ * @return a protocol buffer OpenRegionRequest
+ */
+ public static OpenRegionRequest buildOpenRegionRequest(ServerName server,
+ final List<Pair<RegionInfo, List<ServerName>>> regionOpenInfos, Boolean openForReplay) {
+ OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
+ for (Pair<RegionInfo, List<ServerName>> regionOpenInfo : regionOpenInfos) {
+ builder.addOpenInfo(buildRegionOpenInfo(regionOpenInfo.getFirst(),
+ regionOpenInfo.getSecond(), openForReplay));
+ }
+ if (server != null) {
+ builder.setServerStartCode(server.getStartcode());
+ }
+ // send the master's wall clock time as well, so that the RS can refer to it
+ builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
+ return builder.build();
+ }
+
+ /**
+ * Create a protocol buffer OpenRegionRequest for a given region
+ * @param server the serverName for the RPC
+ * @param region the region to open
+ * @param favoredNodes a list of favored nodes
+ * @param openForReplay whether open for replay
+ * @return a protocol buffer OpenRegionRequest
+ */
+ public static OpenRegionRequest buildOpenRegionRequest(ServerName server,
+ final RegionInfo region, List<ServerName> favoredNodes, Boolean openForReplay) {
+ OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
+ builder.addOpenInfo(buildRegionOpenInfo(region, favoredNodes, openForReplay));
+ if (server != null) {
+ builder.setServerStartCode(server.getStartcode());
+ }
+ builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
+ return builder.build();
+ }
+
+ /**
+ * Create a protocol buffer UpdateFavoredNodesRequest to update a list of favorednode mappings
+ * @param updateRegionInfos a list of favored node mappings
+ * @return a protocol buffer UpdateFavoredNodesRequest
+ */
+ public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest(
+ final List<Pair<RegionInfo, List<ServerName>>> updateRegionInfos) {
+ UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder();
+ if (updateRegionInfos != null && !updateRegionInfos.isEmpty()) {
+ RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder();
+ for (Pair<RegionInfo, List<ServerName>> pair : updateRegionInfos) {
+ builder.setRegion(ProtobufUtil.toRegionInfo(pair.getFirst()));
+ for (ServerName server : pair.getSecond()) {
+ builder.addFavoredNodes(ProtobufUtil.toServerName(server));
+ }
+ ubuilder.addUpdateInfo(builder.build());
+ builder.clear();
}
- ubuilder.addUpdateInfo(builder.build());
- builder.clear();
}
- }
- return ubuilder.build();
- }
+ return ubuilder.build();
+ }
/**
- * Create a WarmupRegionRequest for a given region name
- *
- * @param regionInfo Region we are warming up
+ * Create a WarmupRegionRequest for a given region name
+ * @param regionInfo Region we are warming up
*/
public static WarmupRegionRequest buildWarmupRegionRequest(final RegionInfo regionInfo) {
WarmupRegionRequest.Builder builder = WarmupRegionRequest.newBuilder();
@@ -963,72 +941,57 @@ public final class RequestConverter {
* @param major indicator if it is a major compaction
* @param columnFamily
* @return a CompactRegionRequest
- * @deprecated Use {@link #buildCompactRegionRequest(byte[], boolean, Optional)} instead.
*/
- @Deprecated
public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
byte[] columnFamily) {
- return buildCompactRegionRequest(regionName, major, Optional.ofNullable(columnFamily));
- }
-
- /**
- * Create a CompactRegionRequest for a given region name
- * @param regionName the name of the region to get info
- * @param major indicator if it is a major compaction
- * @param columnFamily
- * @return a CompactRegionRequest
- */
- public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
- Optional<byte[]> columnFamily) {
CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
builder.setMajor(major);
- columnFamily.ifPresent(family -> builder.setFamily(UnsafeByteOperations.unsafeWrap(family)));
+ if (columnFamily != null) {
+ builder.setFamily(UnsafeByteOperations.unsafeWrap(columnFamily));
+ }
return builder.build();
}
- /**
- * @see {@link #buildRollWALWriterRequest()}
- */
- private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST =
- RollWALWriterRequest.newBuilder().build();
-
- /**
- * Create a new RollWALWriterRequest
- *
- * @return a ReplicateWALEntryRequest
- */
- public static RollWALWriterRequest buildRollWALWriterRequest() {
- return ROLL_WAL_WRITER_REQUEST;
- }
-
- /**
- * @see {@link #buildGetServerInfoRequest()}
- */
- private static GetServerInfoRequest GET_SERVER_INFO_REQUEST =
- GetServerInfoRequest.newBuilder().build();
-
- /**
- * Create a new GetServerInfoRequest
- *
- * @return a GetServerInfoRequest
- */
- public static GetServerInfoRequest buildGetServerInfoRequest() {
- return GET_SERVER_INFO_REQUEST;
- }
-
- /**
- * Create a new StopServerRequest
- *
- * @param reason the reason to stop the server
- * @return a StopServerRequest
- */
- public static StopServerRequest buildStopServerRequest(final String reason) {
- StopServerRequest.Builder builder = StopServerRequest.newBuilder();
- builder.setReason(reason);
- return builder.build();
- }
+ /**
+ * @see {@link #buildRollWALWriterRequest()}
+ */
+ private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = RollWALWriterRequest.newBuilder()
+ .build();
+
+ /**
+ * Create a new RollWALWriterRequest
+ * @return a ReplicateWALEntryRequest
+ */
+ public static RollWALWriterRequest buildRollWALWriterRequest() {
+ return ROLL_WAL_WRITER_REQUEST;
+ }
+
+ /**
+ * @see {@link #buildGetServerInfoRequest()}
+ */
+ private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = GetServerInfoRequest.newBuilder()
+ .build();
+
+ /**
+ * Create a new GetServerInfoRequest
+ * @return a GetServerInfoRequest
+ */
+ public static GetServerInfoRequest buildGetServerInfoRequest() {
+ return GET_SERVER_INFO_REQUEST;
+ }
+
+ /**
+ * Create a new StopServerRequest
+ * @param reason the reason to stop the server
+ * @return a StopServerRequest
+ */
+ public static StopServerRequest buildStopServerRequest(final String reason) {
+ StopServerRequest.Builder builder = StopServerRequest.newBuilder();
+ builder.setReason(reason);
+ return builder.build();
+ }
//End utilities for Admin
@@ -1136,36 +1099,15 @@ public final class RequestConverter {
* @param encodedRegionName
* @param destServerName
* @return A MoveRegionRequest
- * @throws DeserializationException
- * @deprecated Use {@link #buildMoveRegionRequest(byte[], Optional)} instead.
- */
- @Deprecated
- public static MoveRegionRequest buildMoveRegionRequest(
- final byte [] encodedRegionName, final byte [] destServerName) throws
- DeserializationException {
- MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder();
- builder.setRegion(
- buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME,encodedRegionName));
- if (destServerName != null) {
- builder.setDestServerName(
- ProtobufUtil.toServerName(ServerName.valueOf(Bytes.toString(destServerName))));
- }
- return builder.build();
- }
-
- /**
- * Create a protocol buffer MoveRegionRequest
- * @param encodedRegionName
- * @param destServerName
- * @return A MoveRegionRequest
*/
public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName,
- Optional<ServerName> destServerName) {
+ ServerName destServerName) {
MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder();
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME,
encodedRegionName));
- destServerName.ifPresent(serverName -> builder.setDestServerName(ProtobufUtil
- .toServerName(serverName)));
+ if (destServerName != null) {
+ builder.setDestServerName(ProtobufUtil.toServerName(destServerName));
+ }
return builder.build();
}
@@ -1320,21 +1262,13 @@ public final class RequestConverter {
final byte [][] splitKeys,
final long nonceGroup,
final long nonce) {
- return buildCreateTableRequest(tableDescriptor, Optional.ofNullable(splitKeys), nonceGroup, nonce);
- }
-
- /**
- * Creates a protocol buffer CreateTableRequest
- * @param tableDescriptor
- * @param splitKeys
- * @return a CreateTableRequest
- */
- public static CreateTableRequest buildCreateTableRequest(TableDescriptor tableDescriptor,
- Optional<byte[][]> splitKeys, long nonceGroup, long nonce) {
CreateTableRequest.Builder builder = CreateTableRequest.newBuilder();
builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
- splitKeys.ifPresent(keys -> Arrays.stream(keys).forEach(
- key -> builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key))));
+ if (splitKeys != null) {
+ for(byte[] key : splitKeys) {
+ builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key));
+ }
+ }
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
@@ -1396,25 +1330,13 @@ public final class RequestConverter {
* @param pattern The compiled regular expression to match against
* @param includeSysTables False to match only against userspace tables
* @return a GetTableDescriptorsRequest
- * @deprecated Use {@link #buildGetTableDescriptorsRequest(Optional, boolean)} instead.
*/
- @Deprecated
public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(final Pattern pattern,
boolean includeSysTables) {
- return buildGetTableDescriptorsRequest(Optional.ofNullable(pattern), includeSysTables);
- }
-
- /**
- * Creates a protocol buffer GetTableDescriptorsRequest
- *
- * @param pattern The compiled regular expression to match against
- * @param includeSysTables False to match only against userspace tables
- * @return a GetTableDescriptorsRequest
- */
- public static GetTableDescriptorsRequest
- buildGetTableDescriptorsRequest(Optional<Pattern> pattern, boolean includeSysTables) {
GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder();
- pattern.ifPresent(p -> builder.setRegex(p.toString()));
+ if (pattern != null) {
+ builder.setRegex(pattern.toString());
+ }
builder.setIncludeSysTables(includeSysTables);
return builder.build();
}
@@ -1425,25 +1347,13 @@ public final class RequestConverter {
* @param pattern The compiled regular expression to match against
* @param includeSysTables False to match only against userspace tables
* @return a GetTableNamesRequest
- * @deprecated Use {@link #buildGetTableNamesRequest(Optional, boolean)} instead.
*/
- @Deprecated
public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern pattern,
boolean includeSysTables) {
- return buildGetTableNamesRequest(Optional.ofNullable(pattern), includeSysTables);
- }
-
- /**
- * Creates a protocol buffer GetTableNamesRequest
- *
- * @param pattern The compiled regular expression to match against
- * @param includeSysTables False to match only against userspace tables
- * @return a GetTableNamesRequest
- */
- public static GetTableNamesRequest buildGetTableNamesRequest(Optional<Pattern> pattern,
- boolean includeSysTables) {
GetTableNamesRequest.Builder builder = GetTableNamesRequest.newBuilder();
- pattern.ifPresent(p -> builder.setRegex(p.toString()));
+ if (pattern != null) {
+ builder.setRegex(pattern.toString());
+ }
builder.setIncludeSysTables(includeSysTables);
return builder.build();
}
@@ -1745,18 +1655,11 @@ public final class RequestConverter {
return builder.build();
}
- /**
- * @deprecated Use {@link #buildListReplicationPeersRequest(Optional)} instead.
- */
- @Deprecated
public static ListReplicationPeersRequest buildListReplicationPeersRequest(Pattern pattern) {
- return buildListReplicationPeersRequest(Optional.ofNullable(pattern));
- }
-
- public static ListReplicationPeersRequest
- buildListReplicationPeersRequest(Optional<Pattern> pattern) {
ListReplicationPeersRequest.Builder builder = ListReplicationPeersRequest.newBuilder();
- pattern.ifPresent(p -> builder.setRegex(p.toString()));
+ if (pattern != null) {
+ builder.setRegex(pattern.toString());
+ }
return builder.build();
}
@@ -1877,4 +1780,4 @@ public final class RequestConverter {
}
return pbServers;
}
-}
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/888f2335/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java
index 0d5c993..25953bc 100644
--- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java
+++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import java.util.Optional;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
@@ -138,7 +137,7 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS
conn.getAdmin(getLocation().getServerName());
CompactRegionRequest request =
RequestConverter.buildCompactRegionRequest(
- getLocation().getRegionInfo().getRegionName(), true, Optional.empty());
+ getLocation().getRegionInfo().getRegionName(), true, null);
server.compactRegion(null, request);
numCompactions.incrementAndGet();
return null;
[05/15] hbase git commit: HBASE-19160 expose CellComparator as
IA.Public
Posted by bu...@apache.org.
HBASE-19160 expose CellComparator as IA.Public
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9ee8e271
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9ee8e271
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9ee8e271
Branch: refs/heads/HBASE-19189
Commit: 9ee8e2714df54345743ddf18bf23899872930b2c
Parents: 888f233
Author: Mike Drob <md...@apache.org>
Authored: Thu Nov 2 16:16:43 2017 -0500
Committer: Mike Drob <md...@apache.org>
Committed: Mon Nov 6 10:08:14 2017 -0600
----------------------------------------------------------------------
.../hadoop/hbase/client/ConnectionUtils.java | 4 ++--
.../org/apache/hadoop/hbase/client/Result.java | 5 ++--
.../hadoop/hbase/filter/FilterListBase.java | 4 ++--
.../hadoop/hbase/filter/FuzzyRowFilter.java | 6 ++---
.../hbase/filter/InclusiveStopFilter.java | 4 ++--
.../org/apache/hadoop/hbase/CellComparator.java | 12 +++++++++-
.../java/org/apache/hadoop/hbase/CellUtil.java | 2 +-
.../java/org/apache/hadoop/hbase/KeyValue.java | 2 +-
.../io/encoding/BufferedDataBlockEncoder.java | 3 +--
.../apache/hadoop/hbase/TestCellComparator.java | 13 ++++++-----
.../hadoop/hbase/util/RedundantKVGenerator.java | 6 ++---
.../mapreduce/IntegrationTestImportTsv.java | 10 ++++----
.../hadoop/hbase/mapreduce/CellSortReducer.java | 4 ++--
.../hbase/mapreduce/HFileOutputFormat2.java | 6 ++---
.../apache/hadoop/hbase/mapreduce/Import.java | 6 ++---
.../hadoop/hbase/mapreduce/PutSortReducer.java | 4 ++--
.../hadoop/hbase/mapreduce/SyncTable.java | 8 +++----
.../hadoop/hbase/mapreduce/TextSortReducer.java | 4 ++--
.../hadoop/hbase/io/hfile/FixedFileTrailer.java | 2 +-
.../org/apache/hadoop/hbase/io/hfile/HFile.java | 4 +---
.../hbase/io/hfile/HFilePrettyPrinter.java | 10 ++++----
.../hadoop/hbase/io/hfile/HFileReaderImpl.java | 3 +--
.../hadoop/hbase/io/hfile/HFileWriterImpl.java | 3 +--
.../org/apache/hadoop/hbase/mob/MobUtils.java | 5 ++--
.../compactions/PartitionedMobCompactor.java | 3 ++-
.../hbase/regionserver/DefaultMemStore.java | 3 +--
.../hadoop/hbase/regionserver/HStore.java | 3 +--
.../hbase/regionserver/StoreFileReader.java | 5 ++--
.../hbase/regionserver/StoreFileWriter.java | 6 ++---
.../hbase/regionserver/wal/FSWALEntry.java | 6 ++---
.../hbase/util/CollectionBackedScanner.java | 5 ++--
.../hadoop/hbase/util/CompressionTest.java | 3 ++-
.../hadoop/hbase/HBaseTestingUtility.java | 2 +-
.../hbase/HFilePerformanceEvaluation.java | 2 +-
.../apache/hadoop/hbase/client/TestResult.java | 18 +++++++--------
.../apache/hadoop/hbase/filter/TestFilter.java | 13 ++++-------
.../hadoop/hbase/filter/TestFilterList.java | 24 ++++++++------------
.../hbase/regionserver/KeyValueScanFixture.java | 6 ++---
.../hbase/regionserver/TestCellFlatSet.java | 10 ++++----
.../regionserver/TestCompactingMemStore.java | 12 ++++------
.../regionserver/TestKeyValueScanFixture.java | 4 ++--
.../hbase/regionserver/TestStoreScanner.java | 21 ++++++++---------
.../AbstractTestScanQueryMatcher.java | 6 ++---
.../hadoop/hbase/spark/HBaseContext.scala | 2 +-
44 files changed, 133 insertions(+), 151 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 5e0e3b7..bc0ade2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -39,7 +39,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -336,7 +336,7 @@ public final class ConnectionUtils {
}
Cell[] rawCells = result.rawCells();
int index =
- Arrays.binarySearch(rawCells, keepCellsAfter, CellComparatorImpl.COMPARATOR::compareWithoutRow);
+ Arrays.binarySearch(rawCells, keepCellsAfter, CellComparator.getInstance()::compareWithoutRow);
if (index < 0) {
index = -index - 1;
} else {
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index cc21ec8..d30c25f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -35,7 +35,6 @@ import java.util.TreeMap;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
@@ -304,7 +303,7 @@ public class Result implements CellScannable, CellScanner {
qualifierNotNull, 0, qualifierNotNull.length);
// pos === ( -(insertion point) - 1)
- int pos = Arrays.binarySearch(kvs, searchTerm, CellComparatorImpl.COMPARATOR);
+ int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.getInstance());
// never will exact match
if (pos < 0) {
pos = (pos+1) * -1;
@@ -349,7 +348,7 @@ public class Result implements CellScannable, CellScanner {
qualifier, qoffset, qlength);
// pos === ( -(insertion point) - 1)
- int pos = Arrays.binarySearch(kvs, searchTerm, CellComparatorImpl.COMPARATOR);
+ int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.getInstance());
// never will exact match
if (pos < 0) {
pos = (pos+1) * -1;
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
index cd36974..4087437 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
@@ -25,7 +25,7 @@ import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.yetus.audience.InterfaceAudience;
@@ -86,7 +86,7 @@ public abstract class FilterListBase extends FilterBase {
}
protected int compareCell(Cell a, Cell b) {
- int cmp = CellComparatorImpl.COMPARATOR.compare(a, b);
+ int cmp = CellComparator.getInstance().compare(a, b);
return reversed ? -1 * cmp : cmp;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 79c3193..d70c282 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -24,8 +24,7 @@ import java.util.List;
import java.util.PriorityQueue;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
-import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -241,8 +240,7 @@ public class FuzzyRowFilter extends FilterBase {
}
boolean lessThan(Cell currentCell, byte[] nextRowKey) {
- int compareResult =
- CellComparatorImpl.COMPARATOR.compareRows(currentCell, nextRowKey, 0, nextRowKey.length);
+ int compareResult = CellComparator.getInstance().compareRows(currentCell, nextRowKey, 0, nextRowKey.length);
return (!isReversed() && compareResult < 0) || (isReversed() && compareResult > 0);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
index 79ebfe5..6e21ba4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
@@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.filter;
import java.util.ArrayList;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
@@ -66,7 +66,7 @@ public class InclusiveStopFilter extends FilterBase {
public boolean filterRowKey(Cell firstRowCell) {
// if stopRowKey is <= buffer, then true, filter row.
if (filterAllRemaining()) return true;
- int cmp = CellComparatorImpl.COMPARATOR.compareRows(firstRowCell, stopRowKey, 0, stopRowKey.length);
+ int cmp = CellComparator.getInstance().compareRows(firstRowCell, stopRowKey, 0, stopRowKey.length);
done = reversed ? cmp < 0 : cmp > 0;
return done;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index a0f2fa4..dc755f5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -25,9 +25,19 @@ import org.apache.yetus.audience.InterfaceStability;
* Comparator for comparing cells and has some specialized methods that allows comparing individual
* cell components like row, family, qualifier and timestamp
*/
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
+@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface CellComparator extends Comparator<Cell> {
+ /**
+ * A comparator for ordering cells in user-space tables. Useful when writing cells in sorted
+ * order as necessary for bulk import (i.e. via MapReduce)
+ * <p>
+ * CAUTION: This comparator may provide inaccurate ordering for cells from system tables,
+ * and should not be relied upon in that case.
+ */
+ static CellComparator getInstance() {
+ return CellComparatorImpl.COMPARATOR;
+ }
/**
* Lexographically compares two cells. The key part of the cell is taken for comparison which
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 8a5bb2c..78f12b5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -1312,7 +1312,7 @@ public final class CellUtil {
}
public static boolean matchingTimestamp(Cell a, Cell b) {
- return CellComparatorImpl.COMPARATOR.compareTimestamps(a.getTimestamp(), b.getTimestamp()) == 0;
+ return CellComparator.getInstance().compareTimestamps(a.getTimestamp(), b.getTimestamp()) == 0;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 42ac97d..c3a429e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -96,7 +96,7 @@ public class KeyValue implements ExtendedCell {
/**
* Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion
* of KeyValue only.
- * @deprecated Use {@link CellComparatorImpl#COMPARATOR} instead. Deprecated for hbase 2.0, remove for hbase 3.0.
+ * @deprecated Use {@link CellComparator#getInstance()} instead. Deprecated for hbase 2.0, remove for hbase 3.0.
*/
@Deprecated
public static final KVComparator COMPARATOR = new KVComparator();
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
index 76e5c9b..e224046 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
@@ -25,7 +25,6 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HConstants;
@@ -882,7 +881,7 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
qualCommonPrefix);
comp = compareCommonQualifierPrefix(seekCell, keyOnlyKV, qualCommonPrefix);
if (comp == 0) {
- comp = CellComparatorImpl.COMPARATOR.compareTimestamps(seekCell, keyOnlyKV);
+ comp = CellComparator.getInstance().compareTimestamps(seekCell, keyOnlyKV);
if (comp == 0) {
// Compare types. Let the delete types sort ahead of puts;
// i.e. types
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
index a6c9dd6..f25925f 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
@@ -31,7 +31,7 @@ import org.junit.experimental.categories.Category;
@Category({MiscTests.class, SmallTests.class})
public class TestCellComparator {
- private CellComparatorImpl comparator = CellComparatorImpl.COMPARATOR;
+ private CellComparator comparator = CellComparator.getInstance();
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
byte[] row_1_0 = Bytes.toBytes("row10");
@@ -53,7 +53,7 @@ public class TestCellComparator {
kv1 = new KeyValue(row1, fam2, qual1, val);
kv2 = new KeyValue(row1, fam1, qual1, val);
- assertTrue((CellComparatorImpl.COMPARATOR.compareFamilies(kv1, kv2) > 0));
+ assertTrue((comparator.compareFamilies(kv1, kv2) > 0));
kv1 = new KeyValue(row1, fam1, qual1, 1l, val);
kv2 = new KeyValue(row1, fam1, qual1, 2l, val);
@@ -110,16 +110,17 @@ public class TestCellComparator {
kv = new KeyValue(r2, f1, q1, v);
buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell2 = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
+ // compareColumns not on CellComparator so use Impl directly
assertEquals(0, CellComparatorImpl.COMPARATOR.compareColumns(bbCell1, bbCell2));
assertEquals(0, CellComparatorImpl.COMPARATOR.compareColumns(bbCell1, kv));
kv = new KeyValue(r2, f1, q2, v);
buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell3 = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
- assertEquals(0, CellComparatorImpl.COMPARATOR.compareFamilies(bbCell2, bbCell3));
- assertTrue(CellComparatorImpl.COMPARATOR.compareQualifiers(bbCell2, bbCell3) < 0);
+ assertEquals(0, comparator.compareFamilies(bbCell2, bbCell3));
+ assertTrue(comparator.compareQualifiers(bbCell2, bbCell3) < 0);
assertTrue(CellComparatorImpl.COMPARATOR.compareColumns(bbCell2, bbCell3) < 0);
- assertEquals(0, CellComparatorImpl.COMPARATOR.compareRows(bbCell2, bbCell3));
- assertTrue(CellComparatorImpl.COMPARATOR.compareRows(bbCell1, bbCell2) < 0);
+ assertEquals(0, comparator.compareRows(bbCell2, bbCell3));
+ assertTrue(comparator.compareRows(bbCell1, bbCell2) < 0);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java
index fb9205d..6835c98 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RedundantKVGenerator.java
@@ -26,7 +26,7 @@ import java.util.Random;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.Tag;
@@ -287,7 +287,7 @@ public class RedundantKVGenerator {
}
}
- Collections.sort(result, CellComparatorImpl.COMPARATOR);
+ Collections.sort(result, CellComparator.getInstance());
return result;
}
@@ -383,7 +383,7 @@ public class RedundantKVGenerator {
}
}
- Collections.sort(result, CellComparatorImpl.COMPARATOR);
+ Collections.sort(result, CellComparator.getInstance());
return result;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
index ef26274..887dd8b 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
@@ -84,7 +84,7 @@ public class IntegrationTestImportTsv extends Configured implements Tool {
public TestName name = new TestName();
protected static final Set<KeyValue> simple_expected =
- new TreeSet<KeyValue>(CellComparatorImpl.COMPARATOR) {
+ new TreeSet<KeyValue>(CellComparator.getInstance()) {
private static final long serialVersionUID = 1L;
{
byte[] family = Bytes.toBytes("d");
@@ -160,10 +160,8 @@ public class IntegrationTestImportTsv extends Configured implements Tool {
"Ran out of expected values prematurely!",
expectedIt.hasNext());
KeyValue expected = expectedIt.next();
- assertTrue(
- format("Scan produced surprising result. expected: <%s>, actual: %s",
- expected, actual),
- CellComparatorImpl.COMPARATOR.compare(expected, actual) == 0);
+ assertEquals("Scan produced surprising result", 0,
+ CellComparator.getInstance().compare(expected, actual));
}
}
assertFalse("Did not consume all expected values.", expectedIt.hasNext());
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java
index ed6b219..499accb 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java
@@ -22,7 +22,7 @@ import java.io.IOException;
import java.util.TreeSet;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.MapReduceCell;
@@ -42,7 +42,7 @@ public class CellSortReducer
protected void reduce(ImmutableBytesWritable row, Iterable<Cell> kvs,
Reducer<ImmutableBytesWritable, Cell, ImmutableBytesWritable, Cell>.Context context)
throws java.io.IOException, InterruptedException {
- TreeSet<Cell> map = new TreeSet<>(CellComparatorImpl.COMPARATOR);
+ TreeSet<Cell> map = new TreeSet<>(CellComparator.getInstance());
for (Cell kv : kvs) {
try {
map.add(PrivateCellUtil.deepClone(kv));
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 162939e..d7606fc 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -46,7 +46,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
@@ -402,12 +402,12 @@ public class HFileOutputFormat2
wl.writer =
new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs)
.withOutputDir(familydir).withBloomType(bloomType)
- .withComparator(CellComparatorImpl.COMPARATOR).withFileContext(hFileContext).build();
+ .withComparator(CellComparator.getInstance()).withFileContext(hFileContext).build();
} else {
wl.writer =
new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs))
.withOutputDir(familydir).withBloomType(bloomType)
- .withComparator(CellComparatorImpl.COMPARATOR).withFileContext(hFileContext)
+ .withComparator(CellComparator.getInstance()).withFileContext(hFileContext)
.withFavoredNodes(favoredNodes).build();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
index f857d4b..d25d497 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -145,10 +145,8 @@ public class Import extends Configured implements Tool {
}
@Override
- @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
- justification="This is wrong, yes, but we should be purging Writables, not fixing them")
public int compareTo(CellWritableComparable o) {
- return CellComparatorImpl.COMPARATOR.compare(this.kv, ((CellWritableComparable)o).kv);
+ return CellComparator.getInstance().compare(this.kv, o.kv);
}
public static class CellWritableComparator extends WritableComparator {
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
index 6c36302..f4ad1f2 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
@@ -27,7 +27,7 @@ import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.Tag;
@@ -77,7 +77,7 @@ public class PutSortReducer extends
"putsortreducer.row.threshold", 1L * (1<<30));
Iterator<Put> iter = puts.iterator();
while (iter.hasNext()) {
- TreeSet<KeyValue> map = new TreeSet<>(CellComparatorImpl.COMPARATOR);
+ TreeSet<KeyValue> map = new TreeSet<>(CellComparator.getInstance());
long curSize = 0;
// stop at the end or the RAM threshold
List<Tag> tags = new ArrayList<>();
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index 3f5cc69..edef842 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
@@ -587,18 +587,18 @@ public class SyncTable extends Configured implements Tool {
return -1; // target missing cell
}
- int result = CellComparatorImpl.COMPARATOR.compareFamilies(c1, c2);
+ int result = CellComparator.getInstance().compareFamilies(c1, c2);
if (result != 0) {
return result;
}
- result = CellComparatorImpl.COMPARATOR.compareQualifiers(c1, c2);
+ result = CellComparator.getInstance().compareQualifiers(c1, c2);
if (result != 0) {
return result;
}
// note timestamp comparison is inverted - more recent cells first
- return CellComparatorImpl.COMPARATOR.compareTimestamps(c1, c2);
+ return CellComparator.getInstance().compareTimestamps(c1, c2);
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
index 0f47032..493a7c4 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
@@ -27,7 +27,7 @@ import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.Tag;
@@ -144,7 +144,7 @@ public class TextSortReducer extends
"reducer.row.threshold", 1L * (1<<30));
Iterator<Text> iter = lines.iterator();
while (iter.hasNext()) {
- Set<KeyValue> kvs = new TreeSet<>(CellComparatorImpl.COMPARATOR);
+ Set<KeyValue> kvs = new TreeSet<>(CellComparator.getInstance());
long curSize = 0;
// stop at the end or the RAM threshold
while (iter.hasNext() && curSize < threshold) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
index 115302e..672919d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
@@ -110,7 +110,7 @@ public class FixedFileTrailer {
/** Raw key comparator class name in version 3 */
// We could write the actual class name from 2.0 onwards and handle BC
- private String comparatorClassName = CellComparatorImpl.COMPARATOR.getClass().getName();
+ private String comparatorClassName = CellComparator.getInstance().getClass().getName();
/** The encryption key */
private byte[] encryptionKey;
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 53ab9eb..d63c120 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HConstants;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.fs.HFileSystem;
@@ -276,8 +275,7 @@ public class HFile {
protected FileSystem fs;
protected Path path;
protected FSDataOutputStream ostream;
- protected CellComparator comparator =
- CellComparatorImpl.COMPARATOR;
+ protected CellComparator comparator = CellComparator.getInstance();
protected InetSocketAddress[] favoredNodes;
private HFileContext fileContext;
protected boolean shouldDropBehind = false;
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index 1087465..990ac5e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -53,7 +53,7 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -380,7 +380,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
do {
Cell cell = scanner.getCell();
if (row != null && row.length != 0) {
- int result = CellComparatorImpl.COMPARATOR.compareRows(cell, row, 0, row.length);
+ int result = CellComparator.getInstance().compareRows(cell, row, 0, row.length);
if (result > 0) {
break;
} else if (result < 0) {
@@ -409,7 +409,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
}
// check if rows are in order
if (checkRow && pCell != null) {
- if (CellComparatorImpl.COMPARATOR.compareRows(pCell, cell) > 0) {
+ if (CellComparator.getInstance().compareRows(pCell, cell) > 0) {
err.println("WARNING, previous row is greater then"
+ " current row\n\tfilename -> " + file + "\n\tprevious -> "
+ CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> "
@@ -425,7 +425,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
+ "\n\tfilename -> " + file + "\n\tkeyvalue -> "
+ CellUtil.getCellKeyAsString(cell));
}
- if (pCell != null && CellComparatorImpl.COMPARATOR.compareFamilies(pCell, cell) != 0) {
+ if (pCell != null && CellComparator.getInstance().compareFamilies(pCell, cell) != 0) {
err.println("WARNING, previous kv has different family"
+ " compared to current key\n\tfilename -> " + file
+ "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell)
@@ -619,7 +619,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
public void collect(Cell cell) {
valLen.update(cell.getValueLength());
if (prevCell != null &&
- CellComparatorImpl.COMPARATOR.compareRows(prevCell, cell) != 0) {
+ CellComparator.getInstance().compareRows(prevCell, cell) != 0) {
// new row
collectRow();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index f2416bc..9e29023 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -106,7 +105,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
private int avgValueLen = -1;
/** Key comparator */
- private CellComparator comparator = CellComparatorImpl.COMPARATOR;
+ private CellComparator comparator = CellComparator.getInstance();
/** Size of this file. */
private final long fileSize;
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index e82b2bb..8c631eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -178,7 +177,7 @@ public class HFileWriterImpl implements HFile.Writer {
} else {
this.blockEncoder = NoOpDataBlockEncoder.INSTANCE;
}
- this.comparator = comparator != null? comparator: CellComparatorImpl.COMPARATOR;
+ this.comparator = comparator != null ? comparator : CellComparator.getInstance();
closeOutputStream = path != null;
this.cacheConf = cacheConf;
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 1d9c10c..8407783 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -42,8 +42,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
-import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
@@ -682,7 +681,7 @@ public final class MobUtils {
StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, fs)
.withFilePath(path)
- .withComparator(CellComparatorImpl.COMPARATOR).withBloomType(bloomType)
+ .withComparator(CellComparator.getInstance()).withBloomType(bloomType)
.withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build();
return w;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index 92c7cef..3064723 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
@@ -812,7 +813,7 @@ public class PartitionedMobCompactor extends MobCompactor {
List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact,
false, true, false, false, HConstants.LATEST_TIMESTAMP);
long ttl = HStore.determineTTLFromFamily(column);
- ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparatorImpl.COMPARATOR);
+ ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparator.getInstance());
return new StoreScanner(scanInfo, scanType, scanners);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index b1a87be..0e0276a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -29,7 +29,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@@ -64,7 +63,7 @@ public class DefaultMemStore extends AbstractMemStore {
* Default constructor. Used for tests.
*/
public DefaultMemStore() {
- this(HBaseConfiguration.create(), CellComparatorImpl.COMPARATOR);
+ this(HBaseConfiguration.create(), CellComparator.getInstance());
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index db900a1..2b23598 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompoundConfiguration;
import org.apache.hadoop.hbase.HConstants;
@@ -778,7 +777,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
+ CellUtil.getCellKeyAsString(prevCell) + " current="
+ CellUtil.getCellKeyAsString(cell));
}
- if (CellComparatorImpl.COMPARATOR.compareFamilies(prevCell, cell) != 0) {
+ if (CellComparator.getInstance().compareFamilies(prevCell, cell) != 0) {
throw new InvalidHFileException("Previous key had different"
+ " family compared to current key: path=" + srcPath
+ " previous="
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index 2e74ecf..a9d9292 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -372,7 +371,7 @@ public class StoreFileReader {
if (bloomFilterType == BloomType.ROW) {
keyIsAfterLast = (Bytes.BYTES_RAWCOMPARATOR.compare(key, lastBloomKey) > 0);
} else {
- keyIsAfterLast = (CellComparatorImpl.COMPARATOR.compare(kvKey, lastBloomKeyOnlyKV)) > 0;
+ keyIsAfterLast = (CellComparator.getInstance().compare(kvKey, lastBloomKeyOnlyKV)) > 0;
}
}
@@ -385,7 +384,7 @@ public class StoreFileReader {
// hbase:meta does not have blooms. So we need not have special interpretation
// of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom
if (keyIsAfterLast
- && (CellComparatorImpl.COMPARATOR.compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0) {
+ && (CellComparator.getInstance().compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0) {
exists = false;
} else {
exists =
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
index 142e3c8..26977e4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
@@ -37,8 +37,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
-import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.KeyValue;
@@ -348,7 +346,7 @@ public class StoreFileWriter implements CellSink, ShipperListener {
private final CacheConfig cacheConf;
private final FileSystem fs;
- private CellComparator comparator = CellComparatorImpl.COMPARATOR;
+ private CellComparator comparator = CellComparator.getInstance();
private BloomType bloomType = BloomType.NONE;
private long maxKeyCount = 0;
private Path dir;
@@ -473,7 +471,7 @@ public class StoreFileWriter implements CellSink, ShipperListener {
}
if (comparator == null) {
- comparator = CellComparatorImpl.COMPARATOR;
+ comparator = CellComparator.getInstance();
}
return new StoreFileWriter(fs, filePath,
conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext,
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
index 88092f1..03ef008 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
@@ -26,7 +26,7 @@ import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -78,11 +78,11 @@ class FSWALEntry extends Entry {
@VisibleForTesting
static Set<byte[]> collectFamilies(List<Cell> cells) {
if (CollectionUtils.isEmpty(cells)) {
- return Collections.<byte[]> emptySet();
+ return Collections.emptySet();
} else {
return cells.stream()
.filter(v -> !CellUtil.matchingFamily(v, WALEdit.METAFAMILY))
- .collect(toCollection(() -> new TreeSet<>(CellComparatorImpl.COMPARATOR::compareFamilies)))
+ .collect(toCollection(() -> new TreeSet<>(CellComparator.getInstance()::compareFamilies)))
.stream()
.map(CellUtil::cloneFamily)
.collect(toCollection(() -> new TreeSet<>(Bytes.BYTES_COMPARATOR)));
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java
index d04ef3d..d8b218c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java
@@ -27,7 +27,6 @@ import java.util.SortedSet;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner;
/**
@@ -41,7 +40,7 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner {
private Cell current;
public CollectionBackedScanner(SortedSet<Cell> set) {
- this(set, CellComparatorImpl.COMPARATOR);
+ this(set, CellComparator.getInstance());
}
public CollectionBackedScanner(SortedSet<Cell> set,
@@ -52,7 +51,7 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner {
}
public CollectionBackedScanner(List<Cell> list) {
- this(list, CellComparatorImpl.COMPARATOR);
+ this(list, CellComparator.getInstance());
}
public CollectionBackedScanner(List<Cell> list,
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
index 2f715df..dbc7afa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
@@ -24,6 +24,7 @@ import java.util.Locale;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -140,7 +141,7 @@ public class CompressionTest {
scanner.seekTo(); // position to the start of file
// Scanner does not do Cells yet. Do below for now till fixed.
cc = scanner.getCell();
- if (CellComparatorImpl.COMPARATOR.compareRows(c, cc) != 0) {
+ if (CellComparator.getInstance().compareRows(c, cc) != 0) {
throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
}
} finally {
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index dce04bd..0a1c60f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -3538,7 +3538,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
int i;
for (i = 0; i < minLen
- && CellComparatorImpl.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
+ && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0;
++i) {}
if (additionalMsg == null) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
index 5992776..b8a86c6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
@@ -366,7 +366,7 @@ public class HFilePerformanceEvaluation {
writer = HFile.getWriterFactoryNoCache(conf)
.withPath(fs, mf)
.withFileContext(hFileContext)
- .withComparator(CellComparatorImpl.COMPARATOR)
+ .withComparator(CellComparator.getInstance())
.create();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
index 86790af..e87602e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
@@ -32,7 +32,7 @@ import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
@@ -71,7 +71,7 @@ public class TestResult extends TestCase {
*/
public void testResultAsCellScanner() throws IOException {
Cell [] cells = genKVs(row, family, value, 1, 10);
- Arrays.sort(cells, CellComparatorImpl.COMPARATOR);
+ Arrays.sort(cells, CellComparator.getInstance());
Result r = Result.create(cells);
assertSame(r, cells);
// Assert I run over same result multiple times.
@@ -93,7 +93,7 @@ public class TestResult extends TestCase {
public void testBasicGetColumn() throws Exception {
KeyValue [] kvs = genKVs(row, family, value, 1, 100);
- Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
+ Arrays.sort(kvs, CellComparator.getInstance());
Result r = Result.create(kvs);
@@ -132,7 +132,7 @@ public class TestResult extends TestCase {
System.arraycopy(kvs1, 0, kvs, 0, kvs1.length);
System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length);
- Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
+ Arrays.sort(kvs, CellComparator.getInstance());
Result r = Result.create(kvs);
for (int i = 0; i < 100; ++i) {
@@ -149,7 +149,7 @@ public class TestResult extends TestCase {
public void testBasicGetValue() throws Exception {
KeyValue [] kvs = genKVs(row, family, value, 1, 100);
- Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
+ Arrays.sort(kvs, CellComparator.getInstance());
Result r = Result.create(kvs);
@@ -169,7 +169,7 @@ public class TestResult extends TestCase {
System.arraycopy(kvs1, 0, kvs, 0, kvs1.length);
System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length);
- Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
+ Arrays.sort(kvs, CellComparator.getInstance());
Result r = Result.create(kvs);
for (int i = 0; i < 100; ++i) {
@@ -183,7 +183,7 @@ public class TestResult extends TestCase {
public void testBasicLoadValue() throws Exception {
KeyValue [] kvs = genKVs(row, family, value, 1, 100);
- Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
+ Arrays.sort(kvs, CellComparator.getInstance());
Result r = Result.create(kvs);
ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024);
@@ -208,7 +208,7 @@ public class TestResult extends TestCase {
System.arraycopy(kvs1, 0, kvs, 0, kvs1.length);
System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length);
- Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
+ Arrays.sort(kvs, CellComparator.getInstance());
ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024);
@@ -291,7 +291,7 @@ public class TestResult extends TestCase {
KeyValue [] kvs = genKVs(Bytes.toBytes(rowSB.toString()), family,
Bytes.toBytes(valueSB.toString()), 1, n);
- Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
+ Arrays.sort(kvs, CellComparator.getInstance());
ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024);
Result r = Result.create(kvs);
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
index 3779b32..637720a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
@@ -31,7 +31,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -45,11 +45,9 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.FilterList.Operator;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.testclassification.FilterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -1666,8 +1664,7 @@ public class TestFilter {
int i = 0;
for (boolean done = true; done; i++) {
done = scanner.next(results);
- Arrays.sort(results.toArray(new Cell[results.size()]),
- CellComparatorImpl.COMPARATOR);
+ Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.getInstance());
LOG.info("counter=" + i + ", " + results);
if (results.isEmpty()) break;
assertTrue("Scanned too many rows! Only expected " + expectedRows +
@@ -1689,7 +1686,7 @@ public class TestFilter {
for (boolean done = true; done; i++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]),
- CellComparatorImpl.COMPARATOR);
+ CellComparator.getInstance());
LOG.info("counter=" + i + ", " + results);
if(results.isEmpty()) break;
assertTrue("Scanned too many rows! Only expected " + expectedRows +
@@ -1711,7 +1708,7 @@ public class TestFilter {
for (boolean done = true; done; row++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]),
- CellComparatorImpl.COMPARATOR);
+ CellComparator.getInstance());
if(results.isEmpty()) break;
assertTrue("Scanned too many keys! Only expected " + kvs.length +
" total but already scanned " + (results.size() + idx) +
@@ -1742,7 +1739,7 @@ public class TestFilter {
for (boolean more = true; more; row++) {
more = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]),
- CellComparatorImpl.COMPARATOR);
+ CellComparator.getInstance());
if(results.isEmpty()) break;
assertTrue("Scanned too many keys! Only expected " + kvs.length +
" total but already scanned " + (results.size() + idx) +
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java
index 4774767..16a57fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java
@@ -32,7 +32,7 @@ import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
@@ -493,13 +493,13 @@ public class TestFilterList {
public byte [] toByteArray() {return null;}
};
+ CellComparator comparator = CellComparator.getInstance();
// MUST PASS ONE
// Should take the min if given two hints
FilterList filterList = new FilterList(Operator.MUST_PASS_ONE,
Arrays.asList(new Filter [] { filterMinHint, filterMaxHint } ));
- assertEquals(0, CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null),
- minKeyValue));
+ assertEquals(0, comparator.compare(filterList.getNextCellHint(null), minKeyValue));
// Should have no hint if any filter has no hint
filterList = new FilterList(Operator.MUST_PASS_ONE,
@@ -513,8 +513,7 @@ public class TestFilterList {
// Should give max hint if its the only one
filterList = new FilterList(Operator.MUST_PASS_ONE,
Arrays.asList(new Filter[] { filterMaxHint, filterMaxHint }));
- assertEquals(0,
- CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
+ assertEquals(0, comparator.compare(filterList.getNextCellHint(null), maxKeyValue));
// MUST PASS ALL
@@ -522,31 +521,26 @@ public class TestFilterList {
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(new Filter [] { filterMinHint, filterMaxHint } ));
filterList.filterCell(null);
- assertEquals(0,
- CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
+ assertEquals(0, comparator.compare(filterList.getNextCellHint(null), maxKeyValue));
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(new Filter [] { filterMaxHint, filterMinHint } ));
filterList.filterCell(null);
- assertEquals(0,
- CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
+ assertEquals(0, comparator.compare(filterList.getNextCellHint(null), maxKeyValue));
// Should have first hint even if a filter has no hint
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(new Filter[] { filterNoHint, filterMinHint, filterMaxHint }));
filterList.filterCell(null);
- assertEquals(0,
- CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
+ assertEquals(0, comparator.compare(filterList.getNextCellHint(null), maxKeyValue));
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(new Filter[] { filterNoHint, filterMaxHint }));
filterList.filterCell(null);
- assertEquals(0,
- CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
+ assertEquals(0, comparator.compare(filterList.getNextCellHint(null), maxKeyValue));
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(new Filter[] { filterNoHint, filterMinHint }));
filterList.filterCell(null);
- assertEquals(0,
- CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), minKeyValue));
+ assertEquals(0, comparator.compare(filterList.getNextCellHint(null), minKeyValue));
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java
index 2a54cb1..f15e432 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java
@@ -23,7 +23,7 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.CollectionBackedScanner;
@@ -34,14 +34,14 @@ import org.apache.hadoop.hbase.util.CollectionBackedScanner;
* to be a store file scanner.
*/
public class KeyValueScanFixture extends CollectionBackedScanner {
- public KeyValueScanFixture(CellComparatorImpl comparator, Cell... cells) {
+ public KeyValueScanFixture(CellComparator comparator, Cell... cells) {
super(comparator, cells);
}
public static List<KeyValueScanner> scanFixture(KeyValue[] ... kvArrays) {
ArrayList<KeyValueScanner> scanners = new ArrayList<>();
for (KeyValue [] kvs : kvArrays) {
- scanners.add(new KeyValueScanFixture(CellComparatorImpl.COMPARATOR, kvs));
+ scanners.add(new KeyValueScanFixture(CellComparator.getInstance(), kvs));
}
return scanners;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java
index f2a4220..0717b4d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java
@@ -29,7 +29,7 @@ import java.util.SortedSet;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
@@ -113,9 +113,9 @@ public class TestCellFlatSet extends TestCase {
lowerOuterCell = new KeyValue(Bytes.toBytes(10), f, q, 10, v);
upperOuterCell = new KeyValue(Bytes.toBytes(50), f, q, 10, v);
ascCells = new Cell[] {kv1,kv2,kv3,kv4};
- ascCbOnHeap = new CellArrayMap(CellComparatorImpl.COMPARATOR,ascCells,0,NUM_OF_CELLS,false);
+ ascCbOnHeap = new CellArrayMap(CellComparator.getInstance(), ascCells,0, NUM_OF_CELLS,false);
descCells = new Cell[] {kv4,kv3,kv2,kv1};
- descCbOnHeap = new CellArrayMap(CellComparatorImpl.COMPARATOR,descCells,0,NUM_OF_CELLS,true);
+ descCbOnHeap = new CellArrayMap(CellComparator.getInstance(), descCells,0, NUM_OF_CELLS,true);
CONF.setBoolean(MemStoreLAB.USEMSLAB_KEY, true);
CONF.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f);
@@ -318,7 +318,7 @@ public class TestCellFlatSet extends TestCase {
idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId()); // seqId
}
- return new CellChunkMap(CellComparatorImpl.COMPARATOR,chunkArray,0,NUM_OF_CELLS,!asc);
+ return new CellChunkMap(CellComparator.getInstance(),chunkArray,0,NUM_OF_CELLS,!asc);
}
/* Create CellChunkMap with four cells inside the data jumbo chunk. This test is working only
@@ -367,6 +367,6 @@ public class TestCellFlatSet extends TestCase {
dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;
}
- return new CellChunkMap(CellComparatorImpl.COMPARATOR,chunkArray,0,NUM_OF_CELLS,!asc);
+ return new CellChunkMap(CellComparator.getInstance(),chunkArray,0,NUM_OF_CELLS,!asc);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
index 0d5290d..0f18dee 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
@@ -28,7 +28,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -88,8 +87,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
@Before
public void setUp() throws Exception {
compactingSetUp();
- this.memstore = new MyCompactingMemStore(HBaseConfiguration.create(), CellComparatorImpl
- .COMPARATOR,
+ this.memstore = new MyCompactingMemStore(HBaseConfiguration.create(), CellComparator.getInstance(),
store, regionServicesForStores, MemoryCompactionPolicy.EAGER);
}
@@ -149,7 +147,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
// use case 3: first in snapshot second in kvset
this.memstore = new CompactingMemStore(HBaseConfiguration.create(),
- CellComparatorImpl.COMPARATOR, store, regionServicesForStores,
+ CellComparator.getInstance(), store, regionServicesForStores,
MemoryCompactionPolicy.EAGER);
this.memstore.add(kv1.clone(), null);
// As compaction is starting in the background the repetition
@@ -192,7 +190,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
Thread.sleep(1);
addRows(this.memstore);
Cell closestToEmpty = ((CompactingMemStore)this.memstore).getNextRow(KeyValue.LOWESTKEY);
- assertTrue(CellComparatorImpl.COMPARATOR.compareRows(closestToEmpty,
+ assertTrue(CellComparator.getInstance().compareRows(closestToEmpty,
new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
for (int i = 0; i < ROW_COUNT; i++) {
Cell nr = ((CompactingMemStore)this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i),
@@ -200,7 +198,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
if (i + 1 == ROW_COUNT) {
assertEquals(nr, null);
} else {
- assertTrue(CellComparatorImpl.COMPARATOR.compareRows(nr,
+ assertTrue(CellComparator.getInstance().compareRows(nr,
new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0);
}
}
@@ -218,7 +216,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
Cell left = results.get(0);
byte[] row1 = Bytes.toBytes(rowId);
assertTrue("Row name",
- CellComparatorImpl.COMPARATOR.compareRows(left, row1, 0, row1.length) == 0);
+ CellComparator.getInstance().compareRows(left, row1, 0, row1.length) == 0);
assertEquals("Count of columns", QUALIFIER_COUNT, results.size());
List<Cell> row = new ArrayList<>();
for (Cell kv : results) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java
index 296cf0c..0c071a6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java
@@ -24,7 +24,7 @@ import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.KeyValueUtil;
@@ -46,7 +46,7 @@ public class TestKeyValueScanFixture extends TestCase {
KeyValueTestUtil.create("RowB", "family", "qf1",
10, KeyValue.Type.Put, "value-10")
};
- KeyValueScanner scan = new KeyValueScanFixture(CellComparatorImpl.COMPARATOR, kvs);
+ KeyValueScanner scan = new KeyValueScanFixture(CellComparator.getInstance(), kvs);
KeyValue kv = KeyValueUtil.createFirstOnRow(Bytes.toBytes("RowA"));
// should seek to this:
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
index f9e4ea9..f4758e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
@@ -76,7 +75,7 @@ public class TestStoreScanner {
private static final byte[] CF = Bytes.toBytes(CF_STR);
static Configuration CONF = HBaseConfiguration.create();
private ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, Integer.MAX_VALUE, Long.MAX_VALUE,
- KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false);
+ KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false);
/**
* From here on down, we have a bunch of defines and specific CELL_GRID of Cells. The
@@ -162,7 +161,7 @@ public class TestStoreScanner {
CellGridStoreScanner(final Scan scan, ScanInfo scanInfo) throws IOException {
super(scan, scanInfo, scan.getFamilyMap().get(CF), Arrays.<KeyValueScanner> asList(
- new KeyValueScanner[] { new KeyValueScanFixture(CellComparatorImpl.COMPARATOR, CELL_GRID) }));
+ new KeyValueScanner[] { new KeyValueScanFixture(CellComparator.getInstance(), CELL_GRID) }));
}
protected void resetKVHeap(List<? extends KeyValueScanner> scanners,
@@ -225,7 +224,7 @@ public class TestStoreScanner {
CellWithVersionsStoreScanner(final Scan scan, ScanInfo scanInfo) throws IOException {
super(scan, scanInfo, scan.getFamilyMap().get(CF),
Arrays.<KeyValueScanner> asList(new KeyValueScanner[] {
- new KeyValueScanFixture(CellComparatorImpl.COMPARATOR, CELL_WITH_VERSIONS) }));
+ new KeyValueScanFixture(CellComparator.getInstance(), CELL_WITH_VERSIONS) }));
}
protected boolean trySkipToNextColumn(Cell cell) throws IOException {
@@ -253,7 +252,7 @@ public class TestStoreScanner {
CellWithVersionsNoOptimizeStoreScanner(Scan scan, ScanInfo scanInfo) throws IOException {
super(scan, scanInfo, scan.getFamilyMap().get(CF),
Arrays.<KeyValueScanner> asList(new KeyValueScanner[] {
- new KeyValueScanFixture(CellComparatorImpl.COMPARATOR, CELL_WITH_VERSIONS) }));
+ new KeyValueScanFixture(CellComparator.getInstance(), CELL_WITH_VERSIONS) }));
}
protected boolean trySkipToNextColumn(Cell cell) throws IOException {
@@ -457,7 +456,7 @@ public class TestStoreScanner {
};
List<KeyValueScanner> scanners = Arrays.<KeyValueScanner>asList(
new KeyValueScanner[] {
- new KeyValueScanFixture(CellComparatorImpl.COMPARATOR, kvs)
+ new KeyValueScanFixture(CellComparator.getInstance(), kvs)
});
Scan scanSpec = new Scan().withStartRow(Bytes.toBytes(r1));
scanSpec.setTimeRange(0, 6);
@@ -508,7 +507,7 @@ public class TestStoreScanner {
};
List<KeyValueScanner> scanners = Arrays.asList(
new KeyValueScanner[] {
- new KeyValueScanFixture(CellComparatorImpl.COMPARATOR, kvs)
+ new KeyValueScanFixture(CellComparator.getInstance(), kvs)
});
Scan scanSpec = new Scan().withStartRow(Bytes.toBytes("R1"));
@@ -804,7 +803,7 @@ public class TestStoreScanner {
Scan scan = new Scan();
scan.readVersions(1);
ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE,
- HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false);
+ HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false);
try (StoreScanner scanner = new StoreScanner(scan, scanInfo, null, scanners)) {
List<Cell> results = new ArrayList<>();
assertEquals(true, scanner.next(results));
@@ -868,7 +867,7 @@ public class TestStoreScanner {
scan.readVersions(1);
// scanner with ttl equal to 500
ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE,
- HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false);
+ HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false);
try (StoreScanner scanner = new StoreScanner(scan, scanInfo, null, scanners)) {
List<Cell> results = new ArrayList<>();
assertEquals(true, scanner.next(results));
@@ -930,7 +929,7 @@ public class TestStoreScanner {
KeepDeletedCells.FALSE /* keepDeletedCells */,
HConstants.DEFAULT_BLOCKSIZE /* block size */,
200, /* timeToPurgeDeletes */
- CellComparatorImpl.COMPARATOR, false);
+ CellComparator.getInstance(), false);
try (StoreScanner scanner =
new StoreScanner(scanInfo, OptionalInt.of(2), ScanType.COMPACT_DROP_DELETES, scanners)) {
List<Cell> results = new ArrayList<>();
@@ -959,7 +958,7 @@ public class TestStoreScanner {
create("R1", "cf", "a", now - 10, KeyValue.Type.Put, "dont-care"), };
List<KeyValueScanner> scanners = scanFixture(kvs);
ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE,
- HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false);
+ HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false);
try (StoreScanner storeScanner = new StoreScanner(scanInfo, OptionalInt.empty(),
ScanType.COMPACT_RETAIN_DELETES, scanners)) {
assertFalse(storeScanner.isScanUsePread());
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/AbstractTestScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/AbstractTestScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/AbstractTestScanQueryMatcher.java
index 049ee74..af63de9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/AbstractTestScanQueryMatcher.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/AbstractTestScanQueryMatcher.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hbase.regionserver.querymatcher;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Scan;
@@ -45,7 +45,7 @@ public class AbstractTestScanQueryMatcher {
protected Get get;
protected long ttl = Long.MAX_VALUE;
- protected CellComparatorImpl rowComparator;
+ protected CellComparator rowComparator;
protected Scan scan;
@Before
@@ -72,6 +72,6 @@ public class AbstractTestScanQueryMatcher {
get.addColumn(fam2, col5);
this.scan = new Scan(get);
- rowComparator = CellComparatorImpl.COMPARATOR;
+ rowComparator = CellComparator.getInstance();
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ee8e271/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
index 0c51b28..eb0d683 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
@@ -917,7 +917,7 @@ class HBaseContext(@transient sc: SparkContext,
new WriterLength(0,
new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs))
.withBloomType(BloomType.valueOf(familyOptions.bloomType))
- .withComparator(CellComparatorImpl.COMPARATOR).withFileContext(hFileContext)
+ .withComparator(CellComparator.getInstance()).withFileContext(hFileContext)
.withFilePath(new Path(familydir, "_" + UUID.randomUUID.toString.replaceAll("-", "")))
.withFavoredNodes(favoredNodes).build())
[02/15] hbase git commit: Change timezone for Ashu Pachauri in pom.xml
Posted by bu...@apache.org.
Change timezone for Ashu Pachauri in pom.xml
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bc3f3ee3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bc3f3ee3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bc3f3ee3
Branch: refs/heads/HBASE-19189
Commit: bc3f3ee3bc43b3c14d61806f799382c9c06a49d6
Parents: c463e9c
Author: Ashu Pachauri <as...@apache.org>
Authored: Mon Nov 6 12:17:41 2017 +0530
Committer: Ashu Pachauri <as...@apache.org>
Committed: Mon Nov 6 12:17:41 2017 +0530
----------------------------------------------------------------------
pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/bc3f3ee3/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 8117583..ffc25fa 100755
--- a/pom.xml
+++ b/pom.xml
@@ -215,7 +215,7 @@
<id>ashu</id>
<name>Ashu Pachauri</name>
<email>ashu@apache.org</email>
- <timezone>-8</timezone>
+ <timezone>+5</timezone>
</developer>
<developer>
<id>binlijin</id>
[14/15] hbase git commit: HBASE-18961 doMiniBatchMutate() is split
into smaller member methods of BatchOperation and it's sub-classes
Posted by bu...@apache.org.
HBASE-18961 doMiniBatchMutate() is split into smaller member methods of BatchOperation and it's sub-classes
There is no functionality change except for below:
* Variable lastIndexExclusive was getting incremented while locking rows corresponding to input
operations. As a result when getRowLockInternal() method throws TimeoutIOException only operations
in range [nextIndexToProcess, lastIndexExclusive) was getting marked as FAILED before raising
exception up the call stack. With these changes all operations are getting marked as FAILED.
* Cluster Ids of first mutation is used consistently for entire batch. Previous behavior was to use
cluster ids of first mutation in a mini-batch
Signed-off-by: Michael Stack <st...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4eae5a29
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4eae5a29
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4eae5a29
Branch: refs/heads/HBASE-19189
Commit: 4eae5a29749da1c34f1a2dd0b1f6aa6f7a9bbffd
Parents: 29fd1de
Author: Umesh Agashe <ua...@cloudera.com>
Authored: Sun Oct 8 00:31:12 2017 -0700
Committer: Michael Stack <st...@apache.org>
Committed: Tue Nov 7 10:00:49 2017 -0800
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/util/NonceKey.java | 4 +-
.../hadoop/hbase/regionserver/HRegion.java | 1328 ++++++++++--------
.../MiniBatchOperationInProgress.java | 44 +-
.../regionserver/MultiRowMutationProcessor.java | 2 +-
.../TestMiniBatchOperationInProgress.java | 4 +-
.../access/TestWithDisabledAuthorization.java | 2 +-
6 files changed, 805 insertions(+), 579 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/4eae5a29/hbase-common/src/main/java/org/apache/hadoop/hbase/util/NonceKey.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/NonceKey.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/NonceKey.java
index 6da808e..b658331 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/NonceKey.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/NonceKey.java
@@ -1,4 +1,4 @@
-/**
+/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -18,7 +18,6 @@
*/
package org.apache.hadoop.hbase.util;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -31,7 +30,6 @@ public class NonceKey {
private long nonce;
public NonceKey(long group, long nonce) {
- assert nonce != HConstants.NO_NONCE;
this.group = group;
this.nonce = nonce;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4eae5a29/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 5cd27b8..82d4bd2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -161,6 +161,7 @@ import org.apache.hadoop.hbase.util.EncryptionTest;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HashedBytes;
+import org.apache.hadoop.hbase.util.NonceKey;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.hadoop.hbase.util.Threads;
@@ -199,6 +200,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDe
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
+import edu.umd.cs.findbugs.annotations.Nullable;
+
/**
* Regions store data for a certain region of a table. It stores all columns
* for each row. A given table consists of one or more Regions.
@@ -642,7 +645,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// flushPerChanges is to prevent too many changes in memstore
private long flushPerChanges;
private long blockingMemStoreSize;
- final long threadWakeFrequency;
// Used to guard closes
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
@@ -757,7 +759,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
}
this.rsServices = rsServices;
- this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
setHTableSpecificConf();
this.scannerReadPoints = new ConcurrentHashMap<>();
@@ -1271,14 +1272,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return writeRequestsCount.sum();
}
- /**
- * Update the write request count for this region
- * @param i increment
- */
- public void updateWriteRequestsCount(long i) {
- writeRequestsCount.add(i);
- }
-
@Override
public long getMemStoreSize() {
return memstoreDataSize.get();
@@ -2218,7 +2211,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return flushcache(force, false, FlushLifeCycleTracker.DUMMY);
}
- public static interface FlushResult {
+ public interface FlushResult {
enum Result {
FLUSHED_NO_COMPACTION_NEEDED,
FLUSHED_COMPACTION_NEEDED,
@@ -3025,105 +3018,355 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/**
- * Struct-like class that tracks the progress of a batch operation, accumulating status codes
- * and tracking the index at which processing is proceeding. These batch operations may get
- * split into mini-batches for processing.
+ * Class that tracks the progress of a batch operations, accumulating status codes and tracking
+ * the index at which processing is proceeding. These batch operations may get split into
+ * mini-batches for processing.
*/
private abstract static class BatchOperation<T> {
- T[] operations;
- int nextIndexToProcess = 0;
- OperationStatus[] retCodeDetails;
- WALEdit[] walEditsFromCoprocessors;
+ protected final T[] operations;
+ protected final OperationStatus[] retCodeDetails;
+ protected final WALEdit[] walEditsFromCoprocessors;
// reference family cell maps directly so coprocessors can mutate them if desired
- Map<byte[], List<Cell>>[] familyCellMaps;
- ObservedExceptionsInBatch observedExceptions;
- Durability durability; //Durability of the batch (highest durability of all operations)
+ protected final Map<byte[], List<Cell>>[] familyCellMaps;
+
+ protected final HRegion region;
+ protected int nextIndexToProcess = 0;
+ protected final ObservedExceptionsInBatch observedExceptions;
+ //Durability of the batch (highest durability of all operations)
+ protected Durability durability;
- public BatchOperation(T[] operations) {
+ public BatchOperation(final HRegion region, T[] operations) {
this.operations = operations;
this.retCodeDetails = new OperationStatus[operations.length];
- this.walEditsFromCoprocessors = new WALEdit[operations.length];
Arrays.fill(this.retCodeDetails, OperationStatus.NOT_RUN);
+ this.walEditsFromCoprocessors = new WALEdit[operations.length];
familyCellMaps = new Map[operations.length];
+
+ this.region = region;
observedExceptions = new ObservedExceptionsInBatch();
durability = Durability.USE_DEFAULT;
}
+ /**
+ * Visitor interface for batch operations
+ */
+ @FunctionalInterface
+ public interface Visitor {
+ /**
+ * @param index operation index
+ * @return If true continue visiting remaining entries, break otherwise
+ */
+ boolean visit(int index) throws IOException;
+ }
+
+ /**
+ * Helper method for visiting pending/ all batch operations
+ */
+ public void visitBatchOperations(boolean pendingOnly, int lastIndexExclusive, Visitor visitor)
+ throws IOException {
+ assert lastIndexExclusive <= this.size();
+ for (int i = nextIndexToProcess; i < lastIndexExclusive; i++) {
+ if (!pendingOnly || isOperationPending(i)) {
+ if (!visitor.visit(i)) {
+ break;
+ }
+ }
+ }
+ }
+
public abstract Mutation getMutation(int index);
public abstract long getNonceGroup(int index);
public abstract long getNonce(int index);
- /** This method is potentially expensive and should only be used for non-replay CP path. */
+ /** This method is potentially expensive and useful mostly for non-replay CP path. */
public abstract Mutation[] getMutationsForCoprocs();
public abstract boolean isInReplay();
- public abstract long getReplaySequenceId();
+ public abstract long getOrigLogSeqNum();
+ public abstract void startRegionOperation() throws IOException;
+ public abstract void closeRegionOperation() throws IOException;
+
+ /**
+ * Validates each mutation and prepares a batch for write. If necessary (non-replay case), runs
+ * CP prePut()/ preDelete() hooks for all mutations in a batch. This is intended to operate on
+ * entire batch and will be called from outside of class to check and prepare batch. This can
+ * be implemented by calling helper method {@link #checkAndPrepareMutation(int, long)} in a
+ * 'for' loop over mutations.
+ */
+ public abstract void checkAndPrepare() throws IOException;
+
+ /**
+ * Implement any Put request specific check and prepare logic here. Please refer to
+ * {@link #checkAndPrepareMutation(Mutation, long)} for how its used.
+ */
+ protected abstract void checkAndPreparePut(final Put p) throws IOException;
+
+ /**
+ * If necessary, calls preBatchMutate() CP hook for a mini-batch and updates metrics, cell
+ * count, tags and timestamp for all cells of all operations in a mini-batch.
+ */
+ public abstract void prepareMiniBatchOperations(MiniBatchOperationInProgress<Mutation>
+ miniBatchOp, long timestamp, final List<RowLock> acquiredRowLocks) throws IOException;
+
+ /**
+ * Write mini-batch operations to MemStore
+ */
+ public abstract WriteEntry writeMiniBatchOperationsToMemStore(
+ final MiniBatchOperationInProgress<Mutation> miniBatchOp, final WriteEntry writeEntry)
+ throws IOException;
+
+ protected void writeMiniBatchOperationsToMemStore(
+ final MiniBatchOperationInProgress<Mutation> miniBatchOp, final long writeNumber)
+ throws IOException {
+ MemStoreSizing memStoreAccounting = new MemStoreSizing();
+ visitBatchOperations(true, miniBatchOp.getLastIndexExclusive(), (int index) -> {
+ // We need to update the sequence id for following reasons.
+ // 1) If the op is in replay mode, FSWALEntry#stampRegionSequenceId won't stamp sequence id.
+ // 2) If no WAL, FSWALEntry won't be used
+ // we use durability of the original mutation for the mutation passed by CP.
+ if (isInReplay() || getMutation(index).getDurability() == Durability.SKIP_WAL) {
+ region.updateSequenceId(familyCellMaps[index].values(), writeNumber);
+ }
+ applyFamilyMapToMemStore(familyCellMaps[index], memStoreAccounting);
+ return true;
+ });
+ // update memStore size
+ region.addAndGetMemStoreSize(memStoreAccounting);
+ }
public boolean isDone() {
return nextIndexToProcess == operations.length;
}
+ public int size() {
+ return operations.length;
+ }
+
+ public boolean isOperationPending(int index) {
+ return retCodeDetails[index].getOperationStatusCode() == OperationStatusCode.NOT_RUN;
+ }
+
+ public List<UUID> getClusterIds() {
+ assert size() != 0;
+ return getMutation(0).getClusterIds();
+ }
+
/**
- * Validates each mutation and prepares a batch for write.
+ * Helper method that checks and prepares only one mutation. This can be used to implement
+ * {@link #checkAndPrepare()} for entire Batch.
* NOTE: As CP prePut()/ preDelete() hooks may modify mutations, this method should be called
- * after prePut()/ preDelete() CP hooks are run for all mutations in a batch.
+ * after prePut()/ preDelete() CP hooks are run for the mutation
*/
- public void checkAndPrepare(final HRegion region) throws IOException {
- long now = EnvironmentEdgeManager.currentTime();
- for (int i = 0 ; i < operations.length; i++) {
- // Skip anything that "ran" already
- if (retCodeDetails[i].getOperationStatusCode() == OperationStatusCode.NOT_RUN) {
- Mutation mutation = getMutation(i);
+ protected void checkAndPrepareMutation(Mutation mutation, final long timestamp)
+ throws IOException {
+ region.checkRow(mutation.getRow(), "batchMutate");
+ if (mutation instanceof Put) {
+ // Check the families in the put. If bad, skip this one.
+ checkAndPreparePut((Put) mutation);
+ region.checkTimestamps(mutation.getFamilyCellMap(), timestamp);
+ } else {
+ region.prepareDelete((Delete) mutation);
+ }
+ }
- try {
- region.checkAndPrepareMutation(mutation, isInReplay(), now);
-
- // store the family map reference to allow for mutations
- familyCellMaps[i] = mutation.getFamilyCellMap();
- // store durability for the batch (highest durability of all operations in the batch)
- Durability tmpDur = region.getEffectiveDurability(mutation.getDurability());
- if (tmpDur.ordinal() > durability.ordinal()) {
- durability = tmpDur;
- }
- } catch (NoSuchColumnFamilyException nscf) {
- final String msg = "No such column family in batch mutation. ";
- if (observedExceptions.hasSeenNoSuchFamily()) {
- LOG.warn(msg + nscf.getMessage());
- } else {
- LOG.warn(msg, nscf);
- observedExceptions.sawNoSuchFamily();
- }
- retCodeDetails[i] = new OperationStatus(
- OperationStatusCode.BAD_FAMILY, nscf.getMessage());
- } catch (FailedSanityCheckException fsce) {
- final String msg = "Batch Mutation did not pass sanity check. ";
- if (observedExceptions.hasSeenFailedSanityCheck()) {
- LOG.warn(msg + fsce.getMessage());
- } else {
- LOG.warn(msg, fsce);
- observedExceptions.sawFailedSanityCheck();
- }
- retCodeDetails[i] = new OperationStatus(
- OperationStatusCode.SANITY_CHECK_FAILURE, fsce.getMessage());
- } catch (WrongRegionException we) {
- final String msg = "Batch mutation had a row that does not belong to this region. ";
- if (observedExceptions.hasSeenWrongRegion()) {
- LOG.warn(msg + we.getMessage());
- } else {
- LOG.warn(msg, we);
- observedExceptions.sawWrongRegion();
+ protected void checkAndPrepareMutation(int index, long timestamp) throws IOException {
+ Mutation mutation = getMutation(index);
+ try {
+ this.checkAndPrepareMutation(mutation, timestamp);
+
+ // store the family map reference to allow for mutations
+ familyCellMaps[index] = mutation.getFamilyCellMap();
+ // store durability for the batch (highest durability of all operations in the batch)
+ Durability tmpDur = region.getEffectiveDurability(mutation.getDurability());
+ if (tmpDur.ordinal() > durability.ordinal()) {
+ durability = tmpDur;
+ }
+ } catch (NoSuchColumnFamilyException nscf) {
+ final String msg = "No such column family in batch mutation. ";
+ if (observedExceptions.hasSeenNoSuchFamily()) {
+ LOG.warn(msg + nscf.getMessage());
+ } else {
+ LOG.warn(msg, nscf);
+ observedExceptions.sawNoSuchFamily();
+ }
+ retCodeDetails[index] = new OperationStatus(
+ OperationStatusCode.BAD_FAMILY, nscf.getMessage());
+ } catch (FailedSanityCheckException fsce) {
+ final String msg = "Batch Mutation did not pass sanity check. ";
+ if (observedExceptions.hasSeenFailedSanityCheck()) {
+ LOG.warn(msg + fsce.getMessage());
+ } else {
+ LOG.warn(msg, fsce);
+ observedExceptions.sawFailedSanityCheck();
+ }
+ retCodeDetails[index] = new OperationStatus(
+ OperationStatusCode.SANITY_CHECK_FAILURE, fsce.getMessage());
+ } catch (WrongRegionException we) {
+ final String msg = "Batch mutation had a row that does not belong to this region. ";
+ if (observedExceptions.hasSeenWrongRegion()) {
+ LOG.warn(msg + we.getMessage());
+ } else {
+ LOG.warn(msg, we);
+ observedExceptions.sawWrongRegion();
+ }
+ retCodeDetails[index] = new OperationStatus(
+ OperationStatusCode.SANITY_CHECK_FAILURE, we.getMessage());
+ }
+ }
+
+ /**
+ * Creates Mini-batch of all operations [nextIndexToProcess, lastIndexExclusive) for which
+ * a row lock can be acquired. All mutations with locked rows are considered to be
+ * In-progress operations and hence the name {@link MiniBatchOperationInProgress}. Mini batch
+ * is window over {@link BatchOperation} and contains contiguous pending operations.
+ *
+ * @param acquiredRowLocks keeps track of rowLocks acquired.
+ */
+ public MiniBatchOperationInProgress<Mutation> lockRowsAndBuildMiniBatch(
+ List<RowLock> acquiredRowLocks) throws IOException {
+ int readyToWriteCount = 0;
+ int lastIndexExclusive = 0;
+ for (; lastIndexExclusive < size(); lastIndexExclusive++) {
+ if (!isOperationPending(lastIndexExclusive)) {
+ continue;
+ }
+ Mutation mutation = getMutation(lastIndexExclusive);
+ // If we haven't got any rows in our batch, we should block to get the next one.
+ RowLock rowLock = null;
+ try {
+ rowLock = region.getRowLockInternal(mutation.getRow(), true);
+ } catch (TimeoutIOException e) {
+ // We will retry when other exceptions, but we should stop if we timeout .
+ throw e;
+ } catch (IOException ioe) {
+ LOG.warn("Failed getting lock, row=" + Bytes.toStringBinary(mutation.getRow()), ioe);
+ }
+ if (rowLock == null) {
+ // We failed to grab another lock
+ break; // Stop acquiring more rows for this batch
+ } else {
+ acquiredRowLocks.add(rowLock);
+ }
+ readyToWriteCount++;
+ }
+ return createMiniBatch(lastIndexExclusive, readyToWriteCount);
+ }
+
+ protected MiniBatchOperationInProgress<Mutation> createMiniBatch(final int lastIndexExclusive,
+ final int readyToWriteCount) {
+ return new MiniBatchOperationInProgress<>(getMutationsForCoprocs(), retCodeDetails,
+ walEditsFromCoprocessors, nextIndexToProcess, lastIndexExclusive, readyToWriteCount);
+ }
+
+ /**
+ * Builds separate WALEdit per nonce by applying input mutations. If WALEdits from CP are
+ * present, they are merged to result WALEdit.
+ */
+ public List<Pair<NonceKey, WALEdit>> buildWALEdits(
+ final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+ List<Pair<NonceKey, WALEdit>> walEdits = new ArrayList<>();
+
+ visitBatchOperations(true, nextIndexToProcess + miniBatchOp.size(), new Visitor() {
+ private Pair<NonceKey, WALEdit> curWALEditForNonce;
+ @Override
+ public boolean visit(int index) throws IOException {
+ Mutation m = getMutation(index);
+ // we use durability of the original mutation for the mutation passed by CP.
+ if (region.getEffectiveDurability(m.getDurability()) == Durability.SKIP_WAL) {
+ region.recordMutationWithoutWal(m.getFamilyCellMap());
+ return true;
+ }
+
+ // the batch may contain multiple nonce keys (replay case). If so, write WALEdit for each.
+ // Given how nonce keys are originally written, these should be contiguous.
+ // They don't have to be, it will still work, just write more WALEdits than needed.
+ long nonceGroup = getNonceGroup(index);
+ long nonce = getNonce(index);
+ if (curWALEditForNonce == null ||
+ curWALEditForNonce.getFirst().getNonceGroup() != nonceGroup ||
+ curWALEditForNonce.getFirst().getNonce() != nonce) {
+ curWALEditForNonce = new Pair<>(new NonceKey(nonceGroup, nonce),
+ new WALEdit(miniBatchOp.getCellCount(), isInReplay()));
+ walEdits.add(curWALEditForNonce);
+ }
+ WALEdit walEdit = curWALEditForNonce.getSecond();
+
+ // Add WAL edits by CP
+ WALEdit fromCP = walEditsFromCoprocessors[index];
+ if (fromCP != null) {
+ for (Cell cell : fromCP.getCells()) {
+ walEdit.add(cell);
}
- retCodeDetails[i] = new OperationStatus(
- OperationStatusCode.SANITY_CHECK_FAILURE, we.getMessage());
}
+ addFamilyMapToWALEdit(familyCellMaps[index], walEdit);
+
+ return true;
+ }
+ });
+ return walEdits;
+ }
+
+ /**
+ * This method completes mini-batch operations by calling postBatchMutate() CP hook (if
+ * required) and completing mvcc.
+ */
+ public void completeMiniBatchOperations(
+ final MiniBatchOperationInProgress<Mutation> miniBatchOp, final WriteEntry writeEntry)
+ throws IOException {
+ if (writeEntry != null) {
+ region.mvcc.completeAndWait(writeEntry);
+ }
+ }
+
+ public void doPostOpCleanupForMiniBatch(
+ final MiniBatchOperationInProgress<Mutation> miniBatchOp, final WALEdit walEdit,
+ boolean success) throws IOException {}
+
+ /**
+ * Atomically apply the given map of family->edits to the memstore.
+ * This handles the consistency control on its own, but the caller
+ * should already have locked updatesLock.readLock(). This also does
+ * <b>not</b> check the families for validity.
+ *
+ * @param familyMap Map of Cells by family
+ */
+ protected void applyFamilyMapToMemStore(Map<byte[], List<Cell>> familyMap,
+ MemStoreSizing memstoreAccounting) throws IOException {
+ for (Map.Entry<byte[], List<Cell>> e : familyMap.entrySet()) {
+ byte[] family = e.getKey();
+ List<Cell> cells = e.getValue();
+ assert cells instanceof RandomAccess;
+ region.applyToMemStore(region.getStore(family), cells, false, memstoreAccounting);
+ }
+ }
+
+ /**
+ * Append the given map of family->edits to a WALEdit data structure.
+ * This does not write to the WAL itself.
+ * @param familyMap map of family->edits
+ * @param walEdit the destination entry to append into
+ */
+ private void addFamilyMapToWALEdit(Map<byte[], List<Cell>> familyMap,
+ WALEdit walEdit) {
+ for (List<Cell> edits : familyMap.values()) {
+ assert edits instanceof RandomAccess;
+ int listSize = edits.size();
+ for (int i=0; i < listSize; i++) {
+ Cell cell = edits.get(i);
+ walEdit.add(cell);
}
}
}
}
+ /**
+ * Batch of mutation operations. Base class is shared with {@link ReplayBatchOperation} as most
+ * of the logic is same.
+ */
private static class MutationBatchOperation extends BatchOperation<Mutation> {
private long nonceGroup;
private long nonce;
- public MutationBatchOperation(Mutation[] operations, long nonceGroup, long nonce) {
- super(operations);
+ public MutationBatchOperation(final HRegion region, Mutation[] operations, long nonceGroup,
+ long nonce) {
+ super(region, operations);
this.nonceGroup = nonceGroup;
this.nonce = nonce;
}
@@ -3154,16 +3397,279 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
@Override
- public long getReplaySequenceId() {
- return 0;
+ public long getOrigLogSeqNum() {
+ return WALKey.NO_SEQUENCE_ID;
+ }
+
+ @Override
+ public void startRegionOperation() throws IOException {
+ region.startRegionOperation(Operation.BATCH_MUTATE);
+ }
+
+ @Override
+ public void closeRegionOperation() throws IOException {
+ region.closeRegionOperation(Operation.BATCH_MUTATE);
+ }
+
+ @Override
+ public void checkAndPreparePut(Put p) throws IOException {
+ region.checkFamilies(p.getFamilyCellMap().keySet());
+ }
+
+ @Override
+ public void checkAndPrepare() throws IOException {
+ final int[] metrics = {0, 0}; // index 0: puts, index 1: deletes
+ visitBatchOperations(true, this.size(), new Visitor() {
+ private long now = EnvironmentEdgeManager.currentTime();
+ private WALEdit walEdit;
+ @Override
+ public boolean visit(int index) throws IOException {
+ // Run coprocessor pre hook outside of locks to avoid deadlock
+ if (region.coprocessorHost != null) {
+ if (walEdit == null) {
+ walEdit = new WALEdit();
+ }
+ callPreMutateCPHook(index, walEdit, metrics);
+ if (!walEdit.isEmpty()) {
+ walEditsFromCoprocessors[index] = walEdit;
+ walEdit = null;
+ }
+ }
+ if (isOperationPending(index)) {
+ // TODO: Currently validation is done with current time before acquiring locks and
+ // updates are done with different timestamps after acquiring locks. This behavior is
+ // inherited from the code prior to this change. Can this be changed?
+ checkAndPrepareMutation(index, now);
+ }
+ return true;
+ }
+ });
+
+ // FIXME: we may update metrics twice! here for all operations bypassed by CP and later in
+ // normal processing.
+ // Update metrics in same way as it is done when we go the normal processing route (we now
+ // update general metrics though a Coprocessor did the work).
+ if (region.metricsRegion != null) {
+ if (metrics[0] > 0) {
+ // There were some Puts in the batch.
+ region.metricsRegion.updatePut();
+ }
+ if (metrics[1] > 0) {
+ // There were some Deletes in the batch.
+ region.metricsRegion.updateDelete();
+ }
+ }
+ }
+
+ @Override
+ public void prepareMiniBatchOperations(MiniBatchOperationInProgress<Mutation> miniBatchOp,
+ long timestamp, final List<RowLock> acquiredRowLocks) throws IOException {
+ byte[] byteTS = Bytes.toBytes(timestamp);
+ visitBatchOperations(true, miniBatchOp.getLastIndexExclusive(), (int index) -> {
+ Mutation mutation = getMutation(index);
+ if (mutation instanceof Put) {
+ region.updateCellTimestamps(familyCellMaps[index].values(), byteTS);
+ miniBatchOp.incrementNumOfPuts();
+ } else {
+ region.prepareDeleteTimestamps(mutation, familyCellMaps[index], byteTS);
+ miniBatchOp.incrementNumOfDeletes();
+ }
+ region.rewriteCellTags(familyCellMaps[index], mutation);
+
+ // update cell count
+ if (region.getEffectiveDurability(mutation.getDurability()) != Durability.SKIP_WAL) {
+ for (List<Cell> cells : mutation.getFamilyCellMap().values()) {
+ miniBatchOp.addCellCount(cells.size());
+ }
+ }
+
+ WALEdit fromCP = walEditsFromCoprocessors[index];
+ if (fromCP != null) {
+ miniBatchOp.addCellCount(fromCP.size());
+ }
+ return true;
+ });
+
+ if (region.coprocessorHost != null) {
+ // calling the pre CP hook for batch mutation
+ region.coprocessorHost.preBatchMutate(miniBatchOp);
+ checkAndMergeCPMutations(miniBatchOp, acquiredRowLocks, timestamp);
+ }
+ }
+
+ @Override
+ public List<Pair<NonceKey, WALEdit>> buildWALEdits(final MiniBatchOperationInProgress<Mutation>
+ miniBatchOp) throws IOException {
+ List<Pair<NonceKey, WALEdit>> walEdits = super.buildWALEdits(miniBatchOp);
+ // for MutationBatchOperation, more than one nonce is not allowed
+ if (walEdits.size() > 1) {
+ throw new IOException("Found multiple nonce keys per batch!");
+ }
+ return walEdits;
+ }
+
+ @Override
+ public WriteEntry writeMiniBatchOperationsToMemStore(
+ final MiniBatchOperationInProgress<Mutation> miniBatchOp, @Nullable WriteEntry writeEntry)
+ throws IOException {
+ if (writeEntry == null) {
+ writeEntry = region.mvcc.begin();
+ }
+ super.writeMiniBatchOperationsToMemStore(miniBatchOp, writeEntry.getWriteNumber());
+ return writeEntry;
+ }
+
+ @Override
+ public void completeMiniBatchOperations(
+ final MiniBatchOperationInProgress<Mutation> miniBatchOp, final WriteEntry writeEntry)
+ throws IOException {
+ // TODO: can it be done after completing mvcc?
+ // calling the post CP hook for batch mutation
+ if (region.coprocessorHost != null) {
+ region.coprocessorHost.postBatchMutate(miniBatchOp);
+ }
+ super.completeMiniBatchOperations(miniBatchOp, writeEntry);
+ }
+
+ @Override
+ public void doPostOpCleanupForMiniBatch(MiniBatchOperationInProgress<Mutation> miniBatchOp,
+ final WALEdit walEdit, boolean success) throws IOException {
+ if (miniBatchOp != null) {
+ // synced so that the coprocessor contract is adhered to.
+ if (region.coprocessorHost != null) {
+ visitBatchOperations(false, miniBatchOp.getLastIndexExclusive(), (int i) -> {
+ // only for successful puts
+ if (retCodeDetails[i].getOperationStatusCode() == OperationStatusCode.SUCCESS) {
+ Mutation m = getMutation(i);
+ if (m instanceof Put) {
+ region.coprocessorHost.postPut((Put) m, walEdit, m.getDurability());
+ } else {
+ region.coprocessorHost.postDelete((Delete) m, walEdit, m.getDurability());
+ }
+ }
+ return true;
+ });
+ }
+
+ // See if the column families were consistent through the whole thing.
+ // if they were then keep them. If they were not then pass a null.
+ // null will be treated as unknown.
+ // Total time taken might be involving Puts and Deletes.
+ // Split the time for puts and deletes based on the total number of Puts and Deletes.
+ if (region.metricsRegion != null) {
+ if (miniBatchOp.getNumOfPuts() > 0) {
+ // There were some Puts in the batch.
+ region.metricsRegion.updatePut();
+ }
+ if (miniBatchOp.getNumOfDeletes() > 0) {
+ // There were some Deletes in the batch.
+ region.metricsRegion.updateDelete();
+ }
+ }
+ }
+
+ if (region.coprocessorHost != null) {
+ // call the coprocessor hook to do any finalization steps after the put is done
+ region.coprocessorHost.postBatchMutateIndispensably(
+ miniBatchOp != null ? miniBatchOp : createMiniBatch(size(), 0), success);
+ }
+ }
+
+ /**
+ * Runs prePut/ preDelete coprocessor hook for input mutation in a batch
+ * @param metrics Array of 2 ints. index 0: count of puts and index 1: count of deletes
+ */
+ private void callPreMutateCPHook(int index, final WALEdit walEdit, final int[] metrics)
+ throws IOException {
+ Mutation m = getMutation(index);
+ if (m instanceof Put) {
+ if (region.coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
+ // pre hook says skip this Put
+ // mark as success and skip in doMiniBatchMutation
+ metrics[0]++;
+ retCodeDetails[index] = OperationStatus.SUCCESS;
+ }
+ } else if (m instanceof Delete) {
+ Delete curDel = (Delete) m;
+ if (curDel.getFamilyCellMap().isEmpty()) {
+ // handle deleting a row case
+ // TODO: prepareDelete() has been called twice, before and after preDelete() CP hook.
+ // Can this be avoided?
+ region.prepareDelete(curDel);
+ }
+ if (region.coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
+ // pre hook says skip this Delete
+ // mark as success and skip in doMiniBatchMutation
+ metrics[1]++;
+ retCodeDetails[index] = OperationStatus.SUCCESS;
+ }
+ } else {
+ // In case of passing Append mutations along with the Puts and Deletes in batchMutate
+ // mark the operation return code as failure so that it will not be considered in
+ // the doMiniBatchMutation
+ retCodeDetails[index] = new OperationStatus(OperationStatusCode.FAILURE,
+ "Put/Delete mutations only supported in batchMutate() now");
+ }
+ }
+
+ private void checkAndMergeCPMutations(final MiniBatchOperationInProgress<Mutation> miniBatchOp,
+ final List<RowLock> acquiredRowLocks, final long timestamp) throws IOException {
+ visitBatchOperations(true, nextIndexToProcess + miniBatchOp.size(), (int i) -> {
+ // we pass (i - firstIndex) below since the call expects a relative index
+ Mutation[] cpMutations = miniBatchOp.getOperationsFromCoprocessors(i - nextIndexToProcess);
+ if (cpMutations == null) {
+ return true;
+ }
+ // Else Coprocessor added more Mutations corresponding to the Mutation at this index.
+ Mutation mutation = getMutation(i);
+ for (Mutation cpMutation : cpMutations) {
+ this.checkAndPrepareMutation(cpMutation, timestamp);
+
+ // Acquire row locks. If not, the whole batch will fail.
+ acquiredRowLocks.add(region.getRowLockInternal(cpMutation.getRow(), true));
+
+ // Returned mutations from coprocessor correspond to the Mutation at index i. We can
+ // directly add the cells from those mutations to the familyMaps of this mutation.
+ Map<byte[], List<Cell>> cpFamilyMap = cpMutation.getFamilyCellMap();
+ // will get added to the memStore later
+ mergeFamilyMaps(familyCellMaps[i], cpFamilyMap);
+
+ // The durability of returned mutation is replaced by the corresponding mutation.
+ // If the corresponding mutation contains the SKIP_WAL, we shouldn't count the
+ // cells of returned mutation.
+ if (region.getEffectiveDurability(mutation.getDurability()) != Durability.SKIP_WAL) {
+ for (List<Cell> cells : cpFamilyMap.values()) {
+ miniBatchOp.addCellCount(cells.size());
+ }
+ }
+ }
+ return true;
+ });
+ }
+
+ private void mergeFamilyMaps(Map<byte[], List<Cell>> familyMap,
+ Map<byte[], List<Cell>> toBeMerged) {
+ for (Map.Entry<byte[], List<Cell>> entry : toBeMerged.entrySet()) {
+ List<Cell> cells = familyMap.get(entry.getKey());
+ if (cells == null) {
+ familyMap.put(entry.getKey(), entry.getValue());
+ } else {
+ cells.addAll(entry.getValue());
+ }
+ }
}
}
+ /**
+ * Batch of mutations for replay. Base class is shared with {@link MutationBatchOperation} as most
+ * of the logic is same.
+ */
private static class ReplayBatchOperation extends BatchOperation<MutationReplay> {
- private long replaySeqId = 0;
- public ReplayBatchOperation(MutationReplay[] operations, long seqId) {
- super(operations);
- this.replaySeqId = seqId;
+ private long origLogSeqNum = 0;
+ public ReplayBatchOperation(final HRegion region, MutationReplay[] operations,
+ long origLogSeqNum) {
+ super(region, operations);
+ this.origLogSeqNum = origLogSeqNum;
}
@Override
@@ -3183,8 +3689,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
@Override
public Mutation[] getMutationsForCoprocs() {
- assert false;
- throw new RuntimeException("Should not be called for replay batch");
+ return null;
}
@Override
@@ -3193,8 +3698,80 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
@Override
- public long getReplaySequenceId() {
- return this.replaySeqId;
+ public long getOrigLogSeqNum() {
+ return this.origLogSeqNum;
+ }
+
+ @Override
+ public void startRegionOperation() throws IOException {
+ region.startRegionOperation(Operation.REPLAY_BATCH_MUTATE);
+ }
+
+ @Override
+ public void closeRegionOperation() throws IOException {
+ region.closeRegionOperation(Operation.REPLAY_BATCH_MUTATE);
+ }
+
+ /**
+ * During replay, there could exist column families which are removed between region server
+ * failure and replay
+ */
+ @Override
+ protected void checkAndPreparePut(Put p) throws IOException {
+ Map<byte[], List<Cell>> familyCellMap = p.getFamilyCellMap();
+ List<byte[]> nonExistentList = null;
+ for (byte[] family : familyCellMap.keySet()) {
+ if (!region.htableDescriptor.hasColumnFamily(family)) {
+ if (nonExistentList == null) {
+ nonExistentList = new ArrayList<>();
+ }
+ nonExistentList.add(family);
+ }
+ }
+ if (nonExistentList != null) {
+ for (byte[] family : nonExistentList) {
+ // Perhaps schema was changed between crash and replay
+ LOG.info("No family for " + Bytes.toString(family) + " omit from reply.");
+ familyCellMap.remove(family);
+ }
+ }
+ }
+
+ @Override
+ public void checkAndPrepare() throws IOException {
+ long now = EnvironmentEdgeManager.currentTime();
+ visitBatchOperations(true, this.size(), (int index) -> {
+ checkAndPrepareMutation(index, now);
+ return true;
+ });
+ }
+
+ @Override
+ public void prepareMiniBatchOperations(MiniBatchOperationInProgress<Mutation> miniBatchOp,
+ long timestamp, final List<RowLock> acquiredRowLocks) throws IOException {
+ visitBatchOperations(true, miniBatchOp.getLastIndexExclusive(), (int index) -> {
+ // update cell count
+ for (List<Cell> cells : getMutation(index).getFamilyCellMap().values()) {
+ miniBatchOp.addCellCount(cells.size());
+ }
+ return true;
+ });
+ }
+
+ @Override
+ public WriteEntry writeMiniBatchOperationsToMemStore(
+ final MiniBatchOperationInProgress<Mutation> miniBatchOp, final WriteEntry writeEntry)
+ throws IOException {
+ super.writeMiniBatchOperationsToMemStore(miniBatchOp, getOrigLogSeqNum());
+ return writeEntry;
+ }
+
+ @Override
+ public void completeMiniBatchOperations(
+ final MiniBatchOperationInProgress<Mutation> miniBatchOp, final WriteEntry writeEntry)
+ throws IOException {
+ super.completeMiniBatchOperations(miniBatchOp, writeEntry);
+ region.mvcc.advanceTo(getOrigLogSeqNum());
}
}
@@ -3204,7 +3781,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// * batchMutate with single mutation - put/delete, separate or from checkAndMutate.
// * coprocessor calls (see ex. BulkDeleteEndpoint).
// So nonces are not really ever used by HBase. They could be by coprocs, and checkAnd...
- return batchMutate(new MutationBatchOperation(mutations, nonceGroup, nonce));
+ return batchMutate(new MutationBatchOperation(this, mutations, nonceGroup, nonce));
}
public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException {
@@ -3232,14 +3809,24 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
return statuses;
}
- return batchMutate(new ReplayBatchOperation(mutations, replaySeqId));
+ return batchMutate(new ReplayBatchOperation(this, mutations, replaySeqId));
}
/**
* Perform a batch of mutations.
+ *
* It supports only Put and Delete mutations and will ignore other types passed. Operations in
* a batch are stored with highest durability specified of for all operations in a batch,
* except for {@link Durability#SKIP_WAL}.
+ *
+ * <p>This function is called from {@link #batchReplay(MutationReplay[], long)} with
+ * {@link ReplayBatchOperation} instance and {@link #batchMutate(Mutation[], long, long)} with
+ * {@link MutationBatchOperation} instance as an argument. As the processing of replay batch
+ * and mutation batch is very similar, lot of code is shared by providing generic methods in
+ * base class {@link BatchOperation}. The logic for this method and
+ * {@link #doMiniBatchMutate(BatchOperation)} is implemented using methods in base class which
+ * are overridden by derived classes to implement special behavior.
+ *
* @param batchOp contains the list of mutations
* @return an array of OperationStatus which internally contains the
* OperationStatusCode and the exceptionMessage if any.
@@ -3247,8 +3834,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
*/
OperationStatus[] batchMutate(BatchOperation<?> batchOp) throws IOException {
boolean initialized = false;
- Operation op = batchOp.isInReplay() ? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
- startRegionOperation(op);
+ batchOp.startRegionOperation();
try {
while (!batchOp.isDone()) {
if (!batchOp.isInReplay()) {
@@ -3257,12 +3843,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
checkResources();
if (!initialized) {
- this.writeRequestsCount.add(batchOp.operations.length);
- if (!batchOp.isInReplay()) {
- callPreMutateCPHooks(batchOp);
- }
- // validate and prepare batch for write, after CP pre-hooks
- batchOp.checkAndPrepare(this);
+ this.writeRequestsCount.add(batchOp.size());
+ // validate and prepare batch for write, for MutationBatchOperation it also calls CP
+ // prePut()/ preDelete() hooks
+ batchOp.checkAndPrepare();
initialized = true;
}
doMiniBatchMutate(batchOp);
@@ -3270,296 +3854,75 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
requestFlushIfNeeded(newSize);
}
} finally {
- closeRegionOperation(op);
+ batchOp.closeRegionOperation();
}
return batchOp.retCodeDetails;
}
/**
- * Runs prePut/ preDelete coprocessor hooks for each mutation in a batch.
- * @param batchOp
- */
- private void callPreMutateCPHooks(BatchOperation<?> batchOp) throws IOException {
- if (coprocessorHost == null) {
- return;
- }
- /* Run coprocessor pre hook outside of locks to avoid deadlock */
- WALEdit walEdit = new WALEdit();
- int noOfPuts = 0;
- int noOfDeletes = 0;
- for (int i = 0 ; i < batchOp.operations.length; i++) {
- Mutation m = batchOp.getMutation(i);
- if (m instanceof Put) {
- if (coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
- // pre hook says skip this Put
- // mark as success and skip in doMiniBatchMutation
- noOfPuts++;
- batchOp.retCodeDetails[i] = OperationStatus.SUCCESS;
- }
- } else if (m instanceof Delete) {
- Delete curDel = (Delete) m;
- if (curDel.getFamilyCellMap().isEmpty()) {
- // handle deleting a row case
- prepareDelete(curDel);
- }
- if (coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
- // pre hook says skip this Delete
- // mark as success and skip in doMiniBatchMutation
- noOfDeletes++;
- batchOp.retCodeDetails[i] = OperationStatus.SUCCESS;
- }
- } else {
- // In case of passing Append mutations along with the Puts and Deletes in batchMutate
- // mark the operation return code as failure so that it will not be considered in
- // the doMiniBatchMutation
- batchOp.retCodeDetails[i] = new OperationStatus(OperationStatusCode.FAILURE,
- "Put/Delete mutations only supported in batchMutate() now");
- }
- if (!walEdit.isEmpty()) {
- batchOp.walEditsFromCoprocessors[i] = walEdit;
- walEdit = new WALEdit();
- }
- }
- // Update metrics in same way as it is done when we go the normal processing route (we now
- // update general metrics though a Coprocessor did the work).
- if (noOfPuts > 0) {
- // There were some Puts in the batch.
- if (this.metricsRegion != null) {
- this.metricsRegion.updatePut();
- }
- }
- if (noOfDeletes > 0) {
- // There were some Deletes in the batch.
- if (this.metricsRegion != null) {
- this.metricsRegion.updateDelete();
- }
- }
- }
-
- /**
* Called to do a piece of the batch that came in to {@link #batchMutate(Mutation[], long, long)}
* In here we also handle replay of edits on region recover.
* @return Change in size brought about by applying <code>batchOp</code>
*/
- // TODO: This needs a rewrite. Doesn't have to be this long. St.Ack 20160120
private void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException {
- boolean replay = batchOp.isInReplay();
- long currentNonceGroup = HConstants.NO_NONCE;
- long currentNonce = HConstants.NO_NONCE;
- WALEdit walEdit = null;
- boolean locked = false;
- // We try to set up a batch in the range [firstIndex,lastIndexExclusive)
- int firstIndex = batchOp.nextIndexToProcess;
- int lastIndexExclusive = firstIndex;
boolean success = false;
- int noOfPuts = 0;
- int noOfDeletes = 0;
+ WALEdit walEdit = null;
WriteEntry writeEntry = null;
- int cellCount = 0;
+ boolean locked = false;
+ // We try to set up a batch in the range [batchOp.nextIndexToProcess,lastIndexExclusive)
+ MiniBatchOperationInProgress<Mutation> miniBatchOp = null;
/** Keep track of the locks we hold so we can release them in finally clause */
- List<RowLock> acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.operations.length);
- MemStoreSizing memStoreAccounting = new MemStoreSizing();
+ List<RowLock> acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.size());
try {
- // STEP 1. Try to acquire as many locks as we can, and ensure we acquire at least one.
- int numReadyToWrite = 0;
- for (; lastIndexExclusive < batchOp.operations.length; lastIndexExclusive++) {
- if (batchOp.retCodeDetails[lastIndexExclusive].getOperationStatusCode()
- != OperationStatusCode.NOT_RUN) {
- continue;
- }
- Mutation mutation = batchOp.getMutation(lastIndexExclusive);
- // If we haven't got any rows in our batch, we should block to get the next one.
- RowLock rowLock = null;
- try {
- rowLock = getRowLockInternal(mutation.getRow(), true);
- } catch (TimeoutIOException e) {
- // We will retry when other exceptions, but we should stop if we timeout .
- throw e;
- } catch (IOException ioe) {
- LOG.warn("Failed getting lock, row=" + Bytes.toStringBinary(mutation.getRow()), ioe);
- }
- if (rowLock == null) {
- // We failed to grab another lock
- break; // Stop acquiring more rows for this batch
- } else {
- acquiredRowLocks.add(rowLock);
- }
-
- numReadyToWrite++;
- if (replay || getEffectiveDurability(mutation.getDurability()) != Durability.SKIP_WAL) {
- for (List<Cell> cells : mutation.getFamilyCellMap().values()) {
- cellCount += cells.size();
- }
- }
- }
+ // STEP 1. Try to acquire as many locks as we can and build mini-batch of operations with
+ // locked rows
+ miniBatchOp = batchOp.lockRowsAndBuildMiniBatch(acquiredRowLocks);
// We've now grabbed as many mutations off the list as we can
- // Nothing to put/delete -- an exception in the above such as NoSuchColumnFamily?
- if (numReadyToWrite <= 0) {
+ // Ensure we acquire at least one.
+ if (miniBatchOp.getReadyToWriteCount() <= 0) {
+ // Nothing to put/delete -- an exception in the above such as NoSuchColumnFamily?
return;
}
- // STEP 2. Update any LATEST_TIMESTAMP timestamps
+ lock(this.updatesLock.readLock(), miniBatchOp.getReadyToWriteCount());
+ locked = true;
+
+ // STEP 2. Update mini batch of all operations in progress with LATEST_TIMESTAMP timestamp
// We should record the timestamp only after we have acquired the rowLock,
// otherwise, newer puts/deletes are not guaranteed to have a newer timestamp
long now = EnvironmentEdgeManager.currentTime();
- if (!replay) {
- byte[] byteNow = Bytes.toBytes(now);
- for (int i = firstIndex; i < lastIndexExclusive; i++) {
- // skip invalid
- if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
- // lastIndexExclusive was incremented above.
- continue;
- }
-
- Mutation mutation = batchOp.getMutation(i);
- if (mutation instanceof Put) {
- updateCellTimestamps(batchOp.familyCellMaps[i].values(), byteNow);
- noOfPuts++;
- } else {
- prepareDeleteTimestamps(mutation, batchOp.familyCellMaps[i], byteNow);
- noOfDeletes++;
- }
- rewriteCellTags(batchOp.familyCellMaps[i], mutation);
- WALEdit fromCP = batchOp.walEditsFromCoprocessors[i];
- if (fromCP != null) {
- cellCount += fromCP.size();
- }
- }
- }
- lock(this.updatesLock.readLock(), numReadyToWrite);
- locked = true;
-
- // calling the pre CP hook for batch mutation
- if (!replay && coprocessorHost != null) {
- MiniBatchOperationInProgress<Mutation> miniBatchOp =
- new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(),
- batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
- coprocessorHost.preBatchMutate(miniBatchOp);
- for (int i = firstIndex; i < lastIndexExclusive; i++) {
- if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
- // lastIndexExclusive was incremented above.
- continue;
- }
- // we pass (i - firstIndex) below since the call expects a relative index
- Mutation[] cpMutations = miniBatchOp.getOperationsFromCoprocessors(i - firstIndex);
- if (cpMutations == null) {
- continue;
- }
- Mutation mutation = batchOp.getMutation(i);
- boolean skipWal = getEffectiveDurability(mutation.getDurability()) == Durability.SKIP_WAL;
- // Else Coprocessor added more Mutations corresponding to the Mutation at this index.
- for (int j = 0; j < cpMutations.length; j++) {
- Mutation cpMutation = cpMutations[j];
- checkAndPrepareMutation(cpMutation, replay, now);
-
- // Acquire row locks. If not, the whole batch will fail.
- acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), true));
-
- // Returned mutations from coprocessor correspond to the Mutation at index i. We can
- // directly add the cells from those mutations to the familyMaps of this mutation.
- Map<byte[], List<Cell>> cpFamilyMap = cpMutation.getFamilyCellMap();
- // will get added to the memStore later
- mergeFamilyMaps(batchOp.familyCellMaps[i], cpFamilyMap);
-
- // The durability of returned mutation is replaced by the corresponding mutation.
- // If the corresponding mutation contains the SKIP_WAL, we shouldn't count the
- // cells of returned mutation.
- if (!skipWal) {
- for (List<Cell> cells : cpFamilyMap.values()) {
- cellCount += cells.size();
- }
- }
- }
- }
- }
+ batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks);
// STEP 3. Build WAL edit
- walEdit = new WALEdit(cellCount, replay);
- for (int i = firstIndex; i < lastIndexExclusive; i++) {
- // Skip puts that were determined to be invalid during preprocessing
- if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
- continue;
- }
+ List<Pair<NonceKey, WALEdit>> walEdits = batchOp.buildWALEdits(miniBatchOp);
- Mutation m = batchOp.getMutation(i);
- // we use durability of the original mutation for the mutation passed by CP.
- if (getEffectiveDurability(m.getDurability()) == Durability.SKIP_WAL) {
- recordMutationWithoutWal(m.getFamilyCellMap());
- continue;
- }
+ // STEP 4. Append the WALEdits to WAL and sync.
+ for(Iterator<Pair<NonceKey, WALEdit>> it = walEdits.iterator(); it.hasNext();) {
+ Pair<NonceKey, WALEdit> nonceKeyWALEditPair = it.next();
+ walEdit = nonceKeyWALEditPair.getSecond();
+ NonceKey nonceKey = nonceKeyWALEditPair.getFirst();
- long nonceGroup = batchOp.getNonceGroup(i);
- long nonce = batchOp.getNonce(i);
- // In replay, the batch may contain multiple nonces. If so, write WALEdit for each.
- // Given how nonces are originally written, these should be contiguous.
- // They don't have to be, it will still work, just write more WALEdits than needed.
- if (nonceGroup != currentNonceGroup || nonce != currentNonce) {
- // Write what we have so far for nonces out to WAL
- appendCurrentNonces(m, replay, walEdit, now, currentNonceGroup, currentNonce);
- walEdit = new WALEdit(cellCount, replay);
- currentNonceGroup = nonceGroup;
- currentNonce = nonce;
+ if (walEdit != null && !walEdit.isEmpty()) {
+ writeEntry = doWALAppend(walEdit, batchOp.durability, batchOp.getClusterIds(), now,
+ nonceKey.getNonceGroup(), nonceKey.getNonce(), batchOp.getOrigLogSeqNum());
}
- // Add WAL edits by CP
- WALEdit fromCP = batchOp.walEditsFromCoprocessors[i];
- if (fromCP != null) {
- for (Cell cell : fromCP.getCells()) {
- walEdit.add(cell);
- }
+ // STEP 6. Complete mvcc for all but last writeEntry (for replay case)
+ if (it.hasNext() && writeEntry != null) {
+ mvcc.complete(writeEntry);
+ writeEntry = null;
}
- addFamilyMapToWALEdit(batchOp.familyCellMaps[i], walEdit);
- }
-
- // STEP 4. Append the final edit to WAL and sync.
- Mutation mutation = batchOp.getMutation(firstIndex);
- writeEntry = doWALAppend(walEdit, batchOp.durability, mutation.getClusterIds(), now,
- currentNonceGroup, currentNonce,
- replay ? batchOp.getReplaySequenceId() : WALKey.NO_SEQUENCE_ID);
- if (!replay && writeEntry == null) {
- // If no writeEntry, then not in replay and skipping WAL or some such. Begin an MVCC
- // transaction to get sequence id.
- writeEntry = mvcc.begin();
}
// STEP 5. Write back to memStore
- for (int i = firstIndex; i < lastIndexExclusive; i++) {
- if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
- continue;
- }
- // We need to update the sequence id for following reasons.
- // 1) If the op is in replay mode, FSWALEntry#stampRegionSequenceId won't stamp sequence id.
- // 2) If no WAL, FSWALEntry won't be used
- // we use durability of the original mutation for the mutation passed by CP.
- boolean updateSeqId = replay || batchOp.getMutation(i).getDurability() == Durability.SKIP_WAL;
- if (updateSeqId) {
- this.updateSequenceId(batchOp.familyCellMaps[i].values(),
- replay? batchOp.getReplaySequenceId(): writeEntry.getWriteNumber());
- }
- applyFamilyMapToMemStore(batchOp.familyCellMaps[i], memStoreAccounting);
- }
-
- // update memstore size
- this.addAndGetMemStoreSize(memStoreAccounting);
-
- // calling the post CP hook for batch mutation
- if (!replay && coprocessorHost != null) {
- MiniBatchOperationInProgress<Mutation> miniBatchOp =
- new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(),
- batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
- coprocessorHost.postBatchMutate(miniBatchOp);
- }
-
- // STEP 6. Complete mvcc.
- if (writeEntry != null) {
- mvcc.completeAndWait(writeEntry);
- writeEntry = null;
- }
- if (replay) {
- this.mvcc.advanceTo(batchOp.getReplaySequenceId());
- }
+ // NOTE: writeEntry can be null here
+ writeEntry = batchOp.writeMiniBatchOperationsToMemStore(miniBatchOp, writeEntry);
+ // STEP 6. Complete MiniBatchOperations: If required calls postBatchMutate() CP hook and
+ // complete mvcc for last writeEntry
+ batchOp.completeMiniBatchOperations(miniBatchOp, writeEntry);
+ writeEntry = null;
success = true;
} finally {
// Call complete rather than completeAndWait because we probably had error if walKey != null
@@ -3570,122 +3933,18 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
releaseRowLocks(acquiredRowLocks);
- for (int i = firstIndex; i < lastIndexExclusive; i++) {
- if (batchOp.retCodeDetails[i] == OperationStatus.NOT_RUN) {
- batchOp.retCodeDetails[i] = success? OperationStatus.SUCCESS : OperationStatus.FAILURE;
- }
- }
-
- // synced so that the coprocessor contract is adhered to.
- if (!replay && coprocessorHost != null) {
- for (int i = firstIndex; i < lastIndexExclusive; i++) {
- // only for successful puts
- if (batchOp.retCodeDetails[i].getOperationStatusCode()
- != OperationStatusCode.SUCCESS) {
- continue;
- }
- Mutation m = batchOp.getMutation(i);
- if (m instanceof Put) {
- coprocessorHost.postPut((Put) m, walEdit, m.getDurability());
- } else {
- coprocessorHost.postDelete((Delete) m, walEdit, m.getDurability());
- }
- }
- }
-
- // See if the column families were consistent through the whole thing.
- // if they were then keep them. If they were not then pass a null.
- // null will be treated as unknown.
- // Total time taken might be involving Puts and Deletes.
- // Split the time for puts and deletes based on the total number of Puts and Deletes.
-
- if (noOfPuts > 0) {
- // There were some Puts in the batch.
- if (this.metricsRegion != null) {
- this.metricsRegion.updatePut();
- }
- }
- if (noOfDeletes > 0) {
- // There were some Deletes in the batch.
- if (this.metricsRegion != null) {
- this.metricsRegion.updateDelete();
- }
- }
-
- if (coprocessorHost != null && !batchOp.isInReplay()) {
- // call the coprocessor hook to do any finalization steps
- // after the put is done
- MiniBatchOperationInProgress<Mutation> miniBatchOp =
- new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(),
- batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
- coprocessorHost.postBatchMutateIndispensably(miniBatchOp, success);
- }
-
- batchOp.nextIndexToProcess = lastIndexExclusive;
- }
- }
-
- private void mergeFamilyMaps(Map<byte[], List<Cell>> familyMap,
- Map<byte[], List<Cell>> toBeMerged) {
- for (Map.Entry<byte[], List<Cell>> entry : toBeMerged.entrySet()) {
- List<Cell> cells = familyMap.get(entry.getKey());
- if (cells == null) {
- familyMap.put(entry.getKey(), entry.getValue());
- } else {
- cells.addAll(entry.getValue());
- }
- }
- }
-
- private void appendCurrentNonces(final Mutation mutation, final boolean replay,
- final WALEdit walEdit, final long now, final long currentNonceGroup, final long currentNonce)
- throws IOException {
- if (walEdit.isEmpty()) return;
- if (!replay) throw new IOException("Multiple nonces per batch and not in replay");
- WALKey walKey = new WALKey(this.getRegionInfo().getEncodedNameAsBytes(),
- this.htableDescriptor.getTableName(), now, mutation.getClusterIds(),
- currentNonceGroup, currentNonce, mvcc, this.getReplicationScope());
- this.wal.append(this.getRegionInfo(), walKey, walEdit, true);
- // Complete the mvcc transaction started down in append else it will block others
- this.mvcc.complete(walKey.getWriteEntry());
- }
+ final int finalLastIndexExclusive =
+ miniBatchOp != null ? miniBatchOp.getLastIndexExclusive() : batchOp.size();
+ final boolean finalSuccess = success;
+ batchOp.visitBatchOperations(true, finalLastIndexExclusive, (int i) -> {
+ batchOp.retCodeDetails[i] =
+ finalSuccess ? OperationStatus.SUCCESS : OperationStatus.FAILURE;
+ return true;
+ });
- private void checkAndPrepareMutation(Mutation mutation, boolean replay, final long now)
- throws IOException {
- checkRow(mutation.getRow(), "doMiniBatchMutation");
- if (mutation instanceof Put) {
- // Check the families in the put. If bad, skip this one.
- if (replay) {
- removeNonExistentColumnFamilyForReplay(mutation.getFamilyCellMap());
- } else {
- checkFamilies(mutation.getFamilyCellMap().keySet());
- }
- checkTimestamps(mutation.getFamilyCellMap(), now);
- } else {
- prepareDelete((Delete)mutation);
- }
- }
+ batchOp.doPostOpCleanupForMiniBatch(miniBatchOp, walEdit, finalSuccess);
- /**
- * During replay, there could exist column families which are removed between region server
- * failure and replay
- */
- private void removeNonExistentColumnFamilyForReplay(final Map<byte[], List<Cell>> familyMap) {
- List<byte[]> nonExistentList = null;
- for (byte[] family : familyMap.keySet()) {
- if (!this.htableDescriptor.hasColumnFamily(family)) {
- if (nonExistentList == null) {
- nonExistentList = new ArrayList<>();
- }
- nonExistentList.add(family);
- }
- }
- if (nonExistentList != null) {
- for (byte[] family : nonExistentList) {
- // Perhaps schema was changed between crash and replay
- LOG.info("No family for " + Bytes.toString(family) + " omit from reply.");
- familyMap.remove(family);
- }
+ batchOp.nextIndexToProcess = finalLastIndexExclusive;
}
}
@@ -4003,25 +4262,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
doBatchMutate(p);
}
- /*
- * Atomically apply the given map of family->edits to the memstore.
- * This handles the consistency control on its own, but the caller
- * should already have locked updatesLock.readLock(). This also does
- * <b>not</b> check the families for validity.
- *
- * @param familyMap Map of Cells by family
- * @param memstoreSize
- */
- private void applyFamilyMapToMemStore(Map<byte[], List<Cell>> familyMap,
- MemStoreSizing memstoreAccounting) throws IOException {
- for (Map.Entry<byte[], List<Cell>> e : familyMap.entrySet()) {
- byte[] family = e.getKey();
- List<Cell> cells = e.getValue();
- assert cells instanceof RandomAccess;
- applyToMemStore(getStore(family), cells, false, memstoreAccounting);
- }
- }
-
/**
* @param delta If we are doing delta changes -- e.g. increment/append -- then this flag will be
* set; when set we will run operations that make sense in the increment/append scenario
@@ -4090,24 +4330,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
}
- /**
- * Append the given map of family->edits to a WALEdit data structure.
- * This does not write to the WAL itself.
- * @param familyMap map of family->edits
- * @param walEdit the destination entry to append into
- */
- private void addFamilyMapToWALEdit(Map<byte[], List<Cell>> familyMap,
- WALEdit walEdit) {
- for (List<Cell> edits : familyMap.values()) {
- assert edits instanceof RandomAccess;
- int listSize = edits.size();
- for (int i=0; i < listSize; i++) {
- Cell cell = edits.get(i);
- walEdit.add(cell);
- }
- }
- }
-
/*
* @param size
* @return True if size is over the flush threshold
@@ -5471,8 +5693,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
private void releaseRowLocks(List<RowLock> rowLocks) {
if (rowLocks != null) {
- for (int i = 0; i < rowLocks.size(); i++) {
- rowLocks.get(i).release();
+ for (RowLock rowLock : rowLocks) {
+ rowLock.release();
}
rowLocks.clear();
}
@@ -5626,7 +5848,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* bulkLoadHFile() to perform any necessary
* pre/post processing of a given bulkload call
*/
- public static interface BulkLoadListener {
+ public interface BulkLoadListener {
/**
* Called before an HFile is actually loaded
* @param family family being loaded to
@@ -6081,7 +6303,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// to handle scan or get operation.
moreValues = nextInternal(outResults, scannerContext);
} else {
- List<Cell> tmpList = new ArrayList<Cell>();
+ List<Cell> tmpList = new ArrayList<>();
moreValues = nextInternal(tmpList, scannerContext);
outResults.addAll(tmpList);
}
@@ -6861,46 +7083,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/**
- * Create a daughter region from given a temp directory with the region data.
- * @param hri Spec. for daughter region to open.
- * @throws IOException
- */
- public HRegion createDaughterRegionFromSplits(final RegionInfo hri) throws IOException {
- // Move the files from the temporary .splits to the final /table/region directory
- fs.commitDaughterRegion(hri);
-
- // Create the daughter HRegion instance
- HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(), fs.getFileSystem(),
- this.getBaseConf(), hri, this.getTableDescriptor(), rsServices);
- r.readRequestsCount.add(this.getReadRequestsCount() / 2);
- r.filteredReadRequestsCount.add(this.getFilteredReadRequestsCount() / 2);
- r.writeRequestsCount.add(this.getWriteRequestsCount() / 2);
- return r;
- }
-
- /**
- * Create a merged region given a temp directory with the region data.
- * @param region_b another merging region
- * @return merged HRegion
- * @throws IOException
- */
- HRegion createMergedRegionFromMerges(final RegionInfo mergedRegionInfo,
- final HRegion region_b) throws IOException {
- HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(),
- fs.getFileSystem(), this.getBaseConf(), mergedRegionInfo,
- this.getTableDescriptor(), this.rsServices);
- r.readRequestsCount.add(this.getReadRequestsCount()
- + region_b.getReadRequestsCount());
- r.filteredReadRequestsCount.add(this.getFilteredReadRequestsCount()
- + region_b.getFilteredReadRequestsCount());
- r.writeRequestsCount.add(this.getWriteRequestsCount()
-
- + region_b.getWriteRequestsCount());
- this.fs.commitMergedRegion(mergedRegionInfo);
- return r;
- }
-
- /**
* Computes the Path of the HRegion
*
* @param tabledir qualified path for table
@@ -6960,7 +7142,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale);
}
- void prepareGet(final Get get) throws IOException, NoSuchColumnFamilyException {
+ void prepareGet(final Get get) throws IOException {
checkRow(get.getRow(), "Get");
// Verify families are all valid
if (get.hasFamilies()) {
@@ -7396,32 +7578,36 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* @return writeEntry associated with this append
*/
private WriteEntry doWALAppend(WALEdit walEdit, Durability durability, List<UUID> clusterIds,
- long now, long nonceGroup, long nonce, long replaySeqId) throws IOException {
- Preconditions.checkArgument(!walEdit.isReplay() || replaySeqId != WALKey.NO_SEQUENCE_ID,
+ long now, long nonceGroup, long nonce, long origLogSeqNum) throws IOException {
+ Preconditions.checkArgument(walEdit != null && !walEdit.isEmpty(),
+ "WALEdit is null or empty!");
+ Preconditions.checkArgument(!walEdit.isReplay() || origLogSeqNum != WALKey.NO_SEQUENCE_ID,
"Invalid replay sequence Id for replay WALEdit!");
+ // Using default cluster id, as this can only happen in the originating cluster.
+ // A slave cluster receives the final value (not the delta) as a Put. We use HLogKey
+ // here instead of WALKey directly to support legacy coprocessors.
+ WALKey walKey = walEdit.isReplay() ? new WALKey(this.getRegionInfo().getEncodedNameAsBytes(),
+ this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, clusterIds, nonceGroup,
+ nonce, mvcc) :
+ new WALKey(this.getRegionInfo().getEncodedNameAsBytes(),
+ this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, clusterIds,
+ nonceGroup, nonce, mvcc, this.getReplicationScope());
+ if (walEdit.isReplay()) {
+ walKey.setOrigLogSeqNum(origLogSeqNum);
+ }
WriteEntry writeEntry = null;
- if (!walEdit.isEmpty()) {
- // Using default cluster id, as this can only happen in the originating cluster.
- // A slave cluster receives the final value (not the delta) as a Put. We use HLogKey
- // here instead of WALKey directly to support legacy coprocessors.
- WALKey walKey = walEdit.isReplay() ? new WALKey(this.getRegionInfo().getEncodedNameAsBytes(),
- this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, clusterIds, nonceGroup,
- nonce, mvcc) :
- new WALKey(this.getRegionInfo().getEncodedNameAsBytes(),
- this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, clusterIds,
- nonceGroup, nonce, mvcc, this.getReplicationScope());
- if (walEdit.isReplay()) {
- walKey.setOrigLogSeqNum(replaySeqId);
+ try {
+ long txid = this.wal.append(this.getRegionInfo(), walKey, walEdit, true);
+ // Call sync on our edit.
+ if (txid != 0) {
+ sync(txid, durability);
}
- try {
- long txid = this.wal.append(this.getRegionInfo(), walKey, walEdit, true);
- // Call sync on our edit.
- if (txid != 0) sync(txid, durability);
- writeEntry = walKey.getWriteEntry();
- } catch (IOException ioe) {
- if (walKey != null) mvcc.complete(walKey.getWriteEntry());
- throw ioe;
+ writeEntry = walKey.getWriteEntry();
+ } catch (IOException ioe) {
+ if (walKey != null) {
+ mvcc.complete(walKey.getWriteEntry());
}
+ throw ioe;
}
return writeEntry;
}
@@ -7637,7 +7823,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* @return Sorted list of <code>cells</code> using <code>comparator</code>
*/
private static List<Cell> sort(List<Cell> cells, final CellComparator comparator) {
- Collections.sort(cells, comparator);
+ cells.sort(comparator);
return cells;
}
@@ -7658,7 +7844,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
ClassSize.OBJECT +
ClassSize.ARRAY +
51 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT +
- (15 * Bytes.SIZEOF_LONG) +
+ (14 * Bytes.SIZEOF_LONG) +
6 * Bytes.SIZEOF_BOOLEAN);
// woefully out of date - currently missing:
http://git-wip-us.apache.org/repos/asf/hbase/blob/4eae5a29/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
index 56a97e0..ba847a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.wal.WALEdit;
@@ -40,13 +41,22 @@ public class MiniBatchOperationInProgress<T> {
private final int firstIndex;
private final int lastIndexExclusive;
+ private int readyToWriteCount = 0;
+ private int cellCount = 0;
+ private int numOfPuts = 0;
+ private int numOfDeletes = 0;
+
+
public MiniBatchOperationInProgress(T[] operations, OperationStatus[] retCodeDetails,
- WALEdit[] walEditsFromCoprocessors, int firstIndex, int lastIndexExclusive) {
+ WALEdit[] walEditsFromCoprocessors, int firstIndex, int lastIndexExclusive,
+ int readyToWriteCount) {
+ Preconditions.checkArgument(readyToWriteCount <= (lastIndexExclusive - firstIndex));
this.operations = operations;
this.retCodeDetails = retCodeDetails;
this.walEditsFromCoprocessors = walEditsFromCoprocessors;
this.firstIndex = firstIndex;
this.lastIndexExclusive = lastIndexExclusive;
+ this.readyToWriteCount = readyToWriteCount;
}
/**
@@ -127,4 +137,36 @@ public class MiniBatchOperationInProgress<T> {
return operationsFromCoprocessors == null ? null :
operationsFromCoprocessors[getAbsoluteIndex(index)];
}
+
+ public int getReadyToWriteCount() {
+ return readyToWriteCount;
+ }
+
+ public int getLastIndexExclusive() {
+ return lastIndexExclusive;
+ }
+
+ public int getCellCount() {
+ return cellCount;
+ }
+
+ public void addCellCount(int cellCount) {
+ this.cellCount += cellCount;
+ }
+
+ public int getNumOfPuts() {
+ return numOfPuts;
+ }
+
+ public void incrementNumOfPuts() {
+ this.numOfPuts += 1;
+ }
+
+ public int getNumOfDeletes() {
+ return numOfDeletes;
+ }
+
+ public void incrementNumOfDeletes() {
+ this.numOfDeletes += 1;
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4eae5a29/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
index c8e9940..0d9d149 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
@@ -137,7 +137,7 @@ MultiRowMutationProcessorResponse> {
if (coprocessorHost != null) {
miniBatch = new MiniBatchOperationInProgress<>(
mutations.toArray(new Mutation[mutations.size()]), opStatus, walEditsFromCP, 0,
- mutations.size());
+ mutations.size(), mutations.size());
coprocessorHost.preBatchMutate(miniBatch);
}
// Apply edits to a single WALEdit
http://git-wip-us.apache.org/repos/asf/hbase/blob/4eae5a29/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMiniBatchOperationInProgress.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMiniBatchOperationInProgress.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMiniBatchOperationInProgress.java
index 4a59379..c3472b5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMiniBatchOperationInProgress.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMiniBatchOperationInProgress.java
@@ -44,7 +44,7 @@ public class TestMiniBatchOperationInProgress {
}
MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatch =
new MiniBatchOperationInProgress<>(operations, retCodeDetails,
- walEditsFromCoprocessors, 0, 5);
+ walEditsFromCoprocessors, 0, 5, 5);
assertEquals(5, miniBatch.size());
assertTrue(Bytes.equals(Bytes.toBytes(0), miniBatch.getOperation(0).getFirst().getRow()));
@@ -69,7 +69,7 @@ public class TestMiniBatchOperationInProgress {
}
miniBatch = new MiniBatchOperationInProgress<>(operations,
- retCodeDetails, walEditsFromCoprocessors, 7, 10);
+ retCodeDetails, walEditsFromCoprocessors, 7, 10, 3);
try {
miniBatch.setWalEdit(-1, new WALEdit());
fail("Should throw Exception while accessing out of range");
http://git-wip-us.apache.org/repos/asf/hbase/blob/4eae5a29/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java
index eb336fe..2fd3909 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java
@@ -867,7 +867,7 @@ public class TestWithDisabledAuthorization extends SecureTestUtil {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preBatchMutate(ObserverContextImpl.createAndPrepare(RCP_ENV),
- new MiniBatchOperationInProgress<>(null, null, null, 0, 0));
+ new MiniBatchOperationInProgress<>(null, null, null, 0, 0, 0));
return null;
}
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
[15/15] hbase git commit: HBASE-19189 Ad-hoc test job for running a
subset of tests lots of times
Posted by bu...@apache.org.
HBASE-19189 Ad-hoc test job for running a subset of tests lots of times
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5339d25b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5339d25b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5339d25b
Branch: refs/heads/HBASE-19189
Commit: 5339d25b4b7c3f8f136ba8c72c299598eff87f4b
Parents: 4eae5a2
Author: Sean Busbey <bu...@apache.org>
Authored: Mon Nov 6 13:48:05 2017 -0600
Committer: Sean Busbey <bu...@apache.org>
Committed: Tue Nov 7 16:46:58 2017 -0600
----------------------------------------------------------------------
dev-support/adhoc_run_tests/Jenkinsfile | 93 +++++++++++++++++++++
dev-support/adhoc_run_tests/adhoc_run_tests.sh | 90 ++++++++++++++++++++
dev-support/gather_machine_environment.sh | 50 +++++++++++
3 files changed, 233 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/5339d25b/dev-support/adhoc_run_tests/Jenkinsfile
----------------------------------------------------------------------
diff --git a/dev-support/adhoc_run_tests/Jenkinsfile b/dev-support/adhoc_run_tests/Jenkinsfile
new file mode 100644
index 0000000..b3793cd
--- /dev/null
+++ b/dev-support/adhoc_run_tests/Jenkinsfile
@@ -0,0 +1,93 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+pipeline {
+ parameters {
+ string(name: 'tests', description: 'space separated list of tests to run. e.g. ' +
+ 'TestLogRollingNoCluster TestMetricRegistryImpl TestConstraints')
+ string(name: 'node', defaultValue: 'Hadoop',
+ description: 'the node label that should be used to run the test.')
+ string(name: 'repeat_count', defaultValue: '100',
+ description: 'number of iterations to run looking for a failure.')
+ string(name: 'fork_count', defaultValue: '0.5C', description: '''
+ Given to surefire to set the number of parallel forks for a given test attempt (i.e. one
+ maven invocation that has all of the specified tests. The default tries to use half of the
+ available cores on the system.
+
+ For more information see
+ <a href="http://maven.apache.org/surefire/maven-surefire-plugin/test-mojo.html#forkCount">
+ the surefire docs on the forkCount parameter</a>
+''')
+ }
+ agent {
+ node {
+ label "${params.node}"
+ }
+ }
+ options {
+ timeout (time: 6, unit: 'HOURS')
+ timestamps()
+ }
+ environment {
+ // where we check out to across stages
+ BASEDIR = "${env.WORKSPACE}/component"
+ OUTPUT_RELATIVE = 'output'
+ OUTPUTDIR = "${env.WORKSPACE}/output"
+ BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
+ }
+ stages {
+ stage ('run tests') {
+ tools {
+ maven 'Maven (latest)'
+ // this needs to be set to the jdk that ought to be used to build releases on the branch
+ // the Jenkinsfile is stored in.
+ jdk "JDK 1.8 (latest)"
+ }
+ steps {
+ sh """#!/bin/bash -e
+ echo "Setting up directories"
+ rm -rf "${env.OUTPUTDIR}" && mkdir "${env.OUTPUTDIR}"
+ rm -rf ".m2-repo" && mkdir ".m2-repo"
+ mkdir "${env.OUTPUTDIR}/machine"
+"""
+ sh """#!/bin/bash -e
+ "${env.BASEDIR}/dev-support/gather_machine_environment.sh" \
+ "${OUTPUT_RELATIVE}/machine"
+"""
+ dir ("component") {
+ sh '''#!/bin/bash -e
+ ./dev-support/adhoc_run_tests/adhoc_run_tests.sh \
+ --force-timeout 1800 \
+ --maven-local-repo ".m2-repo" \
+ --log-output "${OUTPUTDIR}" \
+ --surefire-fork-count "${fork_count}" \
+ --repeat "${repeat_count}" \
+ "${tests}"
+'''
+ }
+ }
+ post {
+ always {
+ archive 'output/*'
+ archive 'output/**/*'
+ }
+ failure {
+ archive 'component/**/target/surefire-reports/*'
+ }
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/5339d25b/dev-support/adhoc_run_tests/adhoc_run_tests.sh
----------------------------------------------------------------------
diff --git a/dev-support/adhoc_run_tests/adhoc_run_tests.sh b/dev-support/adhoc_run_tests/adhoc_run_tests.sh
new file mode 100755
index 0000000..f0e7f8f
--- /dev/null
+++ b/dev-support/adhoc_run_tests/adhoc_run_tests.sh
@@ -0,0 +1,90 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+function usage {
+ echo "Usage: ${0} [options] TestSomeTestName [TestOtherTest...]"
+ echo ""
+ echo " --repeat times number of times to repeat if successful"
+ echo " --force-timeout seconds Seconds to wait before killing a given test run."
+ echo " --maven-local-repo /path/to/use Path for maven artifacts while building"
+ echo " --surefire-fork-count set the fork-count. only useful if multiple " \
+ "tests running (default 0.5C)"
+ echo " --log-output /path/to/use path to directory to hold attempt log"
+ exit 1
+}
+# Get arguments
+declare -i force_timeout=7200
+declare fork_count="0.5C"
+declare -i attempts=1
+declare maven_repo="${HOME}/.m2/repository"
+declare output="."
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ --force-timeout) shift; force_timeout=$1; shift;;
+ --maven-local-repo) shift; maven_repo=$1; shift;;
+ --repeat) shift; attempts=$1; shift;;
+ --log-output) shift; output=$1; shift;;
+ --surefire-fork-count) shift; fork_count=$1; shift;;
+ --) shift; break;;
+ -*) usage ;;
+ *) break;; # terminate while loop
+ esac
+done
+
+if [ "$#" -lt 1 ]; then
+ usage
+fi
+
+function find_module
+{
+ declare testname=$1
+ declare path
+ path=$(find . -name "${testname}.java")
+ while [ -n "${path}" ]; do
+ path=$(dirname "${path}")
+ if [ -f "${path}/pom.xml" ]; then
+ basename "${path}"
+ break
+ fi
+ done
+}
+
+declare -a modules
+
+for test in "${@}"; do
+ module=$(find_module "${test}")
+ if [[ ! "${modules[*]}" =~ ${module} ]]; then
+ modules+=(${module})
+ fi
+done
+
+declare -a mvn_module_arg
+
+for module in "${modules[@]}"; do
+ mvn_module_arg+=(-pl "${module}")
+done
+declare tests="${*}"
+for attempt in $(seq "${attempts}"); do
+ echo "Attempt ${attempt}" >&2
+ mvn --batch-mode -Dmaven.repo.local="${maven_repo}" -Dtest="${tests// /,}" \
+ -Dsurefire.rerunFailingTestsCount=0 -Dsurefire.parallel.forcedTimeout="${force_timeout}" \
+ -Dsurefire.shutdown=kill -DtrimStackTrace=false -am "${mvn_module_arg[@]}" \
+ -DforkCount="${fork_count}" package >"${output}/mvn_test.log" 2>&1
+done
http://git-wip-us.apache.org/repos/asf/hbase/blob/5339d25b/dev-support/gather_machine_environment.sh
----------------------------------------------------------------------
diff --git a/dev-support/gather_machine_environment.sh b/dev-support/gather_machine_environment.sh
new file mode 100755
index 0000000..8f2c74e
--- /dev/null
+++ b/dev-support/gather_machine_environment.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+function usage {
+ echo "Usage: ${0} /path/for/output/dir"
+ echo ""
+ echo " Gather info about a build machine that test harnesses should poll before running."
+ echo " presumes you'll then archive the passed output dir."
+
+ exit 1
+}
+
+if [ "$#" -lt 1 ]; then
+ usage
+fi
+
+
+declare output=$1
+
+echo "getting machine specs, find in ${BUILD_URL}/artifact/${output}/"
+echo "JAVA_HOME: ${JAVA_HOME}" >"${output}/java_home" 2>&1 || true
+ls -l "${JAVA_HOME}" >"${output}/java_home_ls" 2>&1 || true
+echo "MAVEN_HOME: ${MAVEN_HOME}" >"${output}/mvn_home" 2>&1 || true
+mvn --offline --version >"${output}/mvn_version" 2>&1 || true
+cat /proc/cpuinfo >"${output}/cpuinfo" 2>&1 || true
+cat /proc/meminfo >"${output}/meminfo" 2>&1 || true
+cat /proc/diskstats >"${output}/diskstats" 2>&1 || true
+cat /sys/block/sda/stat >"${output}/sys-block-sda-stat" 2>&1 || true
+df -h >"${output}/df-h" 2>&1 || true
+ps -Aww >"${output}/ps-Aww" 2>&1 || true
+ifconfig -a >"${output}/ifconfig-a" 2>&1 || true
+lsblk -ta >"${output}/lsblk-ta" 2>&1 || true
+lsblk -fa >"${output}/lsblk-fa" 2>&1 || true
+ulimit -l >"${output}/ulimit-l" 2>&1 || true
[10/15] hbase git commit: HBASE-19103 Add BigDecimalComparator for
filter
Posted by bu...@apache.org.
HBASE-19103 Add BigDecimalComparator for filter
Signed-off-by: Jan Hentschel <ja...@ultratendency.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0356674c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0356674c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0356674c
Branch: refs/heads/HBASE-19189
Commit: 0356674cd1f59b10ab515058efa948e556fbc79e
Parents: d1b6d8c
Author: QilinCao <ca...@zte.com.cn>
Authored: Mon Oct 30 20:55:11 2017 +0800
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Nov 7 08:07:58 2017 +0100
----------------------------------------------------------------------
.../hbase/filter/BigDecimalComparator.java | 116 ++++++++++++++++++
.../src/main/protobuf/Comparator.proto | 4 +
.../src/main/protobuf/Comparator.proto | 4 +
.../hbase/filter/TestBigDecimalComparator.java | 118 +++++++++++++++++++
.../filter/TestComparatorSerialization.java | 9 ++
.../hadoop/hbase/regionserver/TestHRegion.java | 43 +++++++
6 files changed, 294 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0356674c/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java
new file mode 100644
index 0000000..5da366f
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java
@@ -0,0 +1,116 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.filter;
+
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos;
+import org.apache.hadoop.hbase.util.ByteBufferUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A BigDecimal comparator which numerical compares against the specified byte array
+ */
+@InterfaceAudience.Public
+public class BigDecimalComparator extends ByteArrayComparable {
+ private BigDecimal bigDecimal;
+
+ public BigDecimalComparator(BigDecimal value) {
+ super(Bytes.toBytes(value));
+ this.bigDecimal = value;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || !(obj instanceof BigDecimalComparator)) {
+ return false;
+ }
+ if (this == obj) {
+ return true;
+ }
+ BigDecimalComparator bdc = (BigDecimalComparator) obj;
+ return this.bigDecimal.equals(bdc.bigDecimal);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(this.bigDecimal);
+ }
+
+ @Override
+ public int compareTo(byte[] value, int offset, int length) {
+ BigDecimal that = Bytes.toBigDecimal(value, offset, length);
+ return this.bigDecimal.compareTo(that);
+ }
+
+ @Override
+ public int compareTo(ByteBuffer value, int offset, int length) {
+ BigDecimal that = ByteBufferUtils.toBigDecimal(value, offset, length);
+ return this.bigDecimal.compareTo(that);
+ }
+
+ /**
+ * @return The comparator serialized using pb
+ */
+ @Override
+ public byte[] toByteArray() {
+ ComparatorProtos.BigDecimalComparator.Builder builder =
+ ComparatorProtos.BigDecimalComparator.newBuilder();
+ builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value));
+ return builder.build().toByteArray();
+ }
+
+ /**
+ * @param pbBytes A pb serialized {@link BigDecimalComparator} instance
+ * @return An instance of {@link BigDecimalComparator} made from <code>bytes</code>
+ * @throws DeserializationException A deserialization exception
+ * @see #toByteArray
+ */
+ public static BigDecimalComparator parseFrom(final byte[] pbBytes)
+ throws DeserializationException {
+ ComparatorProtos.BigDecimalComparator proto;
+ try {
+ proto = ComparatorProtos.BigDecimalComparator.parseFrom(pbBytes);
+ } catch (InvalidProtocolBufferException e) {
+ throw new DeserializationException(e);
+ }
+ return new BigDecimalComparator(Bytes.toBigDecimal(proto.getComparable().getValue()
+ .toByteArray()));
+ }
+
+ /**
+ * @param other the other comparator
+ * @return true if and only if the fields of the comparator that are serialized are equal to the
+ * corresponding fields in other. Used for testing.
+ */
+ boolean areSerializedFieldsEqual(BigDecimalComparator other) {
+ if (other == this) {
+ return true;
+ }
+ return super.areSerializedFieldsEqual(other);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0356674c/hbase-protocol-shaded/src/main/protobuf/Comparator.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Comparator.proto b/hbase-protocol-shaded/src/main/protobuf/Comparator.proto
index 822fd2b..55253aa 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Comparator.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Comparator.proto
@@ -73,3 +73,7 @@ message RegexStringComparator {
message SubstringComparator {
required string substr = 1;
}
+
+message BigDecimalComparator {
+ required ByteArrayComparable comparable = 1;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0356674c/hbase-protocol/src/main/protobuf/Comparator.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Comparator.proto b/hbase-protocol/src/main/protobuf/Comparator.proto
index 496b68d..878a179 100644
--- a/hbase-protocol/src/main/protobuf/Comparator.proto
+++ b/hbase-protocol/src/main/protobuf/Comparator.proto
@@ -72,3 +72,7 @@ message RegexStringComparator {
message SubstringComparator {
required string substr = 1;
}
+
+message BigDecimalComparator {
+ required ByteArrayComparable comparable = 1;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0356674c/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBigDecimalComparator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBigDecimalComparator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBigDecimalComparator.java
new file mode 100644
index 0000000..76a9721
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBigDecimalComparator.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.filter;
+
+import java.math.BigDecimal;
+
+import org.apache.hadoop.hbase.testclassification.FilterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ FilterTests.class, SmallTests.class })
+public class TestBigDecimalComparator {
+
+ @Test
+ public void testObjectEquals() {
+ BigDecimal bd = new BigDecimal(Double.MIN_VALUE);
+ // Check that equals returns true for identical objects
+ final BigDecimalComparator bdc = new BigDecimalComparator(bd);
+ Assert.assertTrue(bdc.equals(bdc));
+ Assert.assertEquals(bdc.hashCode(), bdc.hashCode());
+
+ // Check that equals returns true for the same object
+ final BigDecimalComparator bdc1 = new BigDecimalComparator(bd);
+ final BigDecimalComparator bdc2 = new BigDecimalComparator(bd);
+ Assert.assertTrue(bdc1.equals(bdc2));
+ Assert.assertEquals(bdc1.hashCode(), bdc2.hashCode());
+
+ // Check that equals returns false for different objects
+ final BigDecimalComparator bdc3 = new BigDecimalComparator(bd);
+ final BigDecimalComparator bdc4 = new BigDecimalComparator(new BigDecimal(Long.MIN_VALUE));
+ Assert.assertFalse(bdc3.equals(bdc4));
+ Assert.assertNotEquals(bdc3.hashCode(), bdc4.hashCode());
+
+ // Check that equals returns false for a different type
+ final BigDecimalComparator bdc5 = new BigDecimalComparator(bd);
+ Assert.assertFalse(bdc5.equals(0));
+ }
+
+ @Test
+ public void testEqualsValue() {
+ // given
+ BigDecimal bd1 = new BigDecimal(Double.MAX_VALUE);
+ BigDecimal bd2 = new BigDecimal(Double.MIN_VALUE);
+ byte[] value1 = Bytes.toBytes(bd1);
+ byte[] value2 = Bytes.toBytes(bd2);
+ BigDecimalComparator comparator1 = new BigDecimalComparator(bd1);
+ BigDecimalComparator comparator2 = new BigDecimalComparator(bd2);
+
+ // when
+ int comp1 = comparator1.compareTo(value1);
+ int comp2 = comparator2.compareTo(value2);
+
+ // then
+ Assert.assertEquals(0, comp1);
+ Assert.assertEquals(0, comp2);
+ }
+
+ @Test
+ public void testGreaterThanValue() {
+ // given
+ byte[] val1 = Bytes.toBytes(new BigDecimal("1000000000000000000000000000000.9999999999999999"));
+ byte[] val2 = Bytes.toBytes(new BigDecimal(0));
+ byte[] val3 = Bytes.toBytes(new BigDecimal(Double.MIN_VALUE));
+ BigDecimal bd = new BigDecimal(Double.MAX_VALUE);
+ BigDecimalComparator comparator = new BigDecimalComparator(bd);
+
+ // when
+ int comp1 = comparator.compareTo(val1);
+ int comp2 = comparator.compareTo(val2);
+ int comp3 = comparator.compareTo(val3);
+
+ // then
+ Assert.assertEquals(1, comp1);
+ Assert.assertEquals(1, comp2);
+ Assert.assertEquals(1, comp3);
+ }
+
+ @Test
+ public void testLessThanValue() {
+ // given
+ byte[] val1 = Bytes.toBytes(new BigDecimal("-1000000000000000000000000000000"));
+ byte[] val2 = Bytes.toBytes(new BigDecimal(0));
+ byte[] val3 = Bytes.toBytes(new BigDecimal(1));
+ BigDecimal bd = new BigDecimal("-1000000000000000000000000000000.0000000000000001");
+ BigDecimalComparator comparator = new BigDecimalComparator(bd);
+
+ // when
+ int comp1 = comparator.compareTo(val1);
+ int comp2 = comparator.compareTo(val2);
+ int comp3 = comparator.compareTo(val3);
+
+ // then
+ Assert.assertEquals(-1, comp1);
+ Assert.assertEquals(-1, comp2);
+ Assert.assertEquals(-1, comp3);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0356674c/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java
index 2befef0..fef699b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.filter;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import java.math.BigDecimal;
import java.util.regex.Pattern;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -86,4 +87,12 @@ public class TestComparatorSerialization {
ProtobufUtil.toComparator(ProtobufUtil.toComparator(substringComparator))));
}
+ @Test
+ public void testBigDecimalComparator() throws Exception {
+ BigDecimal bigDecimal = new BigDecimal(Double.MIN_VALUE);
+ BigDecimalComparator bigDecimalComparator = new BigDecimalComparator(bigDecimal);
+ assertTrue(bigDecimalComparator.areSerializedFieldsEqual(ProtobufUtil.toComparator(ProtobufUtil
+ .toComparator(bigDecimalComparator))));
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0356674c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 2e3edf1..fec7151 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -42,6 +42,7 @@ import static org.mockito.Mockito.when;
import java.io.IOException;
import java.io.InterruptedIOException;
+import java.math.BigDecimal;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
@@ -113,6 +114,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
+import org.apache.hadoop.hbase.filter.BigDecimalComparator;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -1783,6 +1785,8 @@ public class TestHRegion {
byte[] qf1 = Bytes.toBytes("qualifier");
byte[] val1 = Bytes.toBytes("value1");
byte[] val2 = Bytes.toBytes("value2");
+ BigDecimal bd1 = new BigDecimal(Double.MAX_VALUE);
+ BigDecimal bd2 = new BigDecimal(Double.MIN_VALUE);
// Setting up region
this.region = initHRegion(tableName, method, CONF, fam1);
@@ -1803,6 +1807,25 @@ public class TestHRegion {
res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BinaryComparator(val2),
put, true);
assertEquals(false, res);
+
+ // Putting data in key
+ put = new Put(row1);
+ put.addColumn(fam1, qf1, Bytes.toBytes(bd1));
+ region.put(put);
+
+ // checkAndPut with wrong value
+ res =
+ region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BigDecimalComparator(
+ bd2), put, true);
+ assertEquals(false, res);
+
+ // checkAndDelete with wrong value
+ delete = new Delete(row1);
+ delete.addFamily(fam1);
+ res =
+ region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BigDecimalComparator(
+ bd2), put, true);
+ assertEquals(false, res);
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
@@ -1815,6 +1838,7 @@ public class TestHRegion {
byte[] fam1 = Bytes.toBytes("fam1");
byte[] qf1 = Bytes.toBytes("qualifier");
byte[] val1 = Bytes.toBytes("value1");
+ BigDecimal bd1 = new BigDecimal(Double.MIN_VALUE);
// Setting up region
this.region = initHRegion(tableName, method, CONF, fam1);
@@ -1835,6 +1859,25 @@ public class TestHRegion {
res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BinaryComparator(val1),
delete, true);
assertEquals(true, res);
+
+ // Putting data in key
+ put = new Put(row1);
+ put.addColumn(fam1, qf1, Bytes.toBytes(bd1));
+ region.put(put);
+
+ // checkAndPut with correct value
+ res =
+ region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BigDecimalComparator(
+ bd1), put, true);
+ assertEquals(true, res);
+
+ // checkAndDelete with correct value
+ delete = new Delete(row1);
+ delete.addColumn(fam1, qf1);
+ res =
+ region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BigDecimalComparator(
+ bd1), delete, true);
+ assertEquals(true, res);
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
[08/15] hbase git commit: HBASE-19186 Unify to use bytes to show size
in master/rs ui
Posted by bu...@apache.org.
HBASE-19186 Unify to use bytes to show size in master/rs ui
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6011a16
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6011a16
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6011a16
Branch: refs/heads/HBASE-19189
Commit: b6011a16fffebae21e56c41206b29d96c0613024
Parents: 2a99b87
Author: Guanghao Zhang <zg...@apache.org>
Authored: Sun Nov 5 12:41:02 2017 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Tue Nov 7 10:07:03 2017 +0800
----------------------------------------------------------------------
.../tmpl/regionserver/BlockCacheTmpl.jamon | 4 +-
.../tmpl/regionserver/ServerMetricsTmpl.jamon | 10 +-
.../hbase-webapps/master/procedures.jsp | 9 +-
.../hbase-webapps/master/processMaster.jsp | 9 +-
.../hbase-webapps/master/processRS.jsp | 228 -------------------
.../resources/hbase-webapps/master/table.jsp | 2 +-
.../hbase-webapps/regionserver/processRS.jsp | 9 +-
7 files changed, 23 insertions(+), 248 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/b6011a16/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index b4e44d8..5ea5bcc 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -244,13 +244,13 @@ org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
<td>Size</td>
<td><% TraditionalBinaryPrefix.long2String(cacheConfig.getBlockCache().getCurrentSize(),
"B", 1) %></td>
- <td>Current size of block cache in use (bytes)</td>
+ <td>Current size of block cache in use</td>
</tr>
<tr>
<td>Free</td>
<td><% TraditionalBinaryPrefix.long2String(cacheConfig.getBlockCache().getFreeSize(),
"B", 1) %></td>
- <td>The total free memory currently available to store more cache entries (bytes)</td>
+ <td>The total free memory currently available to store more cache entries</td>
</tr>
<tr>
<td>Count</td>
http://git-wip-us.apache.org/repos/asf/hbase/blob/b6011a16/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index 2e99d5b..adcfff1 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -146,7 +146,7 @@ MetricsRegionServerWrapper mWrap;
<tr>
<tr>
<th>Num. WAL Files</th>
- <th>Size. WAL Files (bytes)</th>
+ <th>Size. WAL Files</th>
</tr>
</tr>
<tr>
@@ -165,9 +165,9 @@ MetricsRegionServerWrapper mWrap;
<th>Num. Stores</th>
<th>Num. Storefiles</th>
- <th>Root Index Size (bytes)</th>
- <th>Index Size (bytes)</th>
- <th>Bloom Size (bytes)</th>
+ <th>Root Index Size</th>
+ <th>Index Size</th>
+ <th>Bloom Size</th>
</tr>
<tr>
<td><% mWrap.getNumStores() %></td>
@@ -212,7 +212,7 @@ MetricsHBaseServerWrapper mServerWrap;
<th>Priority Call Queue Length</th>
<th>General Call Queue Length</th>
<th>Replication Call Queue Length</th>
- <th>Total Call Queue Size (bytes)</th>
+ <th>Total Call Queue Size</th>
</tr>
<tr>
http://git-wip-us.apache.org/repos/asf/hbase/blob/b6011a16/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
index 63a41cc..c3df296 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
@@ -39,6 +39,7 @@
import="org.apache.hadoop.hbase.procedure2.util.StringUtils"
import="org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos"
import="org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil"
+ import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
%>
<%
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
@@ -173,7 +174,7 @@
<% ProcedureWALFile pwf = procedureWALFiles.get(i); %>
<tr>
<td> <%= pwf.getLogId() %></td>
- <td> <%= StringUtils.humanSize(pwf.getSize()) %> </td>
+ <td> <%= TraditionalBinaryPrefix.long2String(pwf.getSize(), "B", 1) %> </td>
<td> <%= new Date(pwf.getTimestamp()) %> </td>
<td> <%= escapeXml(pwf.toString()) %> </td>
</tr>
@@ -195,7 +196,7 @@
<% for (ProcedureWALFile cwf:corruptedWALFiles) { %>
<tr>
<td> <%= cwf.getLogId() %></td>
- <td> <%= StringUtils.humanSize(cwf.getSize()) %> </td>
+ <td> <%= TraditionalBinaryPrefix.long2String(cwf.getSize(), "B", 1) %> </td>
<td> <%= new Date(cwf.getTimestamp()) %> </td>
<td> <%= escapeXml(cwf.toString()) %> </td>
</tr>
@@ -232,8 +233,8 @@
<td> <%= new Date(syncMetrics.getTimestamp()) %></td>
<td> <%= StringUtils.humanTimeDiff(syncMetrics.getSyncWaitMs()) %></td>
<td> <%= syncMetrics.getSyncedEntries() %></td>
- <td> <%= StringUtils.humanSize(syncMetrics.getTotalSyncedBytes()) %></td>
- <td> <%= StringUtils.humanSize(syncMetrics.getSyncedPerSec()) %></td>
+ <td> <%= TraditionalBinaryPrefix.long2String(syncMetrics.getTotalSyncedBytes(), "B", 1) %></td>
+ <td> <%= TraditionalBinaryPrefix.long2String((long)syncMetrics.getSyncedPerSec(), "B", 1) %></td>
</tr>
<%} %>
</table>
http://git-wip-us.apache.org/repos/asf/hbase/blob/b6011a16/hbase-server/src/main/resources/hbase-webapps/master/processMaster.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/processMaster.jsp b/hbase-server/src/main/resources/hbase-webapps/master/processMaster.jsp
index d99e198..75da38a 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/processMaster.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/processMaster.jsp
@@ -29,6 +29,7 @@
import="java.lang.management.GarbageCollectorMXBean"
import="org.apache.hadoop.hbase.util.JSONMetricUtil"
import="org.apache.hadoop.hbase.procedure2.util.StringUtils"
+ import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
import="com.fasterxml.jackson.databind.JsonNode"
%>
<%
@@ -207,10 +208,10 @@ if(mp.getName().contains("Cache")) continue;%>
</tr>
<tr>
<tr>
- <td><%= StringUtils.humanSize(mp.getUsage().getCommitted()) %></a></td>
- <td><%= StringUtils.humanSize(mp.getUsage().getInit())%></a></td>
- <td><%= StringUtils.humanSize(mp.getUsage().getMax())%></a></td>
- <td><%= StringUtils.humanSize(mp.getUsage().getUsed())%></a></td>
+ <td><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getCommitted(), "B", 1) %></a></td>
+ <td><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getInit(), "B", 1) %></a></td>
+ <td><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getMax(), "B", 1) %></a></td>
+ <td><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getUsed(), "B", 1) %></a></td>
<td><%= JSONMetricUtil.calcPercentage(mp.getUsage().getUsed(),
mp.getUsage().getCommitted()) %></a></td>
</tr>
http://git-wip-us.apache.org/repos/asf/hbase/blob/b6011a16/hbase-server/src/main/resources/hbase-webapps/master/processRS.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/processRS.jsp b/hbase-server/src/main/resources/hbase-webapps/master/processRS.jsp
deleted file mode 100644
index f0df0c0..0000000
--- a/hbase-server/src/main/resources/hbase-webapps/master/processRS.jsp
+++ /dev/null
@@ -1,228 +0,0 @@
-<%--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
---%>
-<%@ page contentType="text/html;charset=UTF-8"
- import="java.util.Date"
- import="java.util.List"
- import="org.apache.hadoop.hbase.HBaseConfiguration"
- import="static org.apache.commons.lang3.StringEscapeUtils.escapeXml"
- import="javax.management.ObjectName"
- import="java.lang.management.ManagementFactory"
- import="java.lang.management.MemoryPoolMXBean"
- import="java.lang.management.RuntimeMXBean"
- import="java.lang.management.GarbageCollectorMXBean"
- import="org.apache.hadoop.hbase.util.JSONMetricUtil"
- import="org.apache.hadoop.hbase.procedure2.util.StringUtils"
- import="com.fasterxml.jackson.databind.JsonNode"
-%>
-<%
-RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean();
-ObjectName jvmMetrics = new ObjectName("Hadoop:service=HBase,name=JvmMetrics");
-ObjectName rsMetrics = new ObjectName("Hadoop:service=HBase,name=RegionServer,sub=Server");
-
-// There is always two of GC collectors
-List<GarbageCollectorMXBean> gcBeans = JSONMetricUtil.getGcCollectorBeans();
-GarbageCollectorMXBean collector1 = null;
-GarbageCollectorMXBean collector2 = null;
-try {
-collector1 = gcBeans.get(0);
-collector2 = gcBeans.get(1);
-} catch(IndexOutOfBoundsException e) {}
-List<MemoryPoolMXBean> mPools = JSONMetricUtil.getMemoryPools();
-%>
-<!DOCTYPE html>
-<?xml version="1.0" encoding="UTF-8" ?>
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
- <meta charset="utf-8">
- <title>Process info for PID: <%= JSONMetricUtil.getProcessPID() %></title>
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
- <meta name="description" content="">
- <meta name="author" content="">
-
- <link href="/static/css/bootstrap.min.css" rel="stylesheet">
- <link href="/static/css/bootstrap-theme.min.css" rel="stylesheet">
- <link href="/static/css/hbase.css" rel="stylesheet">
-</head>
-<body>
-<div class="navbar navbar-fixed-top navbar-default">
- <div class="container-fluid">
- <div class="navbar-header">
- <button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
- <span class="icon-bar"></span>
- <span class="icon-bar"></span>
- <span class="icon-bar"></span>
- </button>
- <a class="navbar-brand" href="/rs-status"><img src="/static/hbase_logo_small.png" alt="HBase Logo"/></a>
- </div>
- <div class="collapse navbar-collapse">
- <ul class="nav navbar-nav">
- <li><a href="/rs-status">Home</a></li>
- <li><a href="/processRS.jsp">Process Metrics</a></li>
- <li><a href="/logs/">Local Logs</a></li>
- <li><a href="/dump">Debug Dump</a></li>
- <li><a href="/jmx">Metrics Dump</a></li>
- <% if (HBaseConfiguration.isShowConfInServlet()) { %>
- <li><a href="/conf">HBase Configuration</a></li>
- <% } %>
- </ul>
- </div><!--/.nav-collapse -->
- </div>
-</div>
-<div class="container-fluid content">
- <div class="row">
- <div class="page-header">
- <h1><%= JSONMetricUtil.getCommmand().split(" ")[0] %></h1>
- </div>
- </div>
- <table class="table table-striped" width="90%" >
- <tr>
- <th>Started</th>
- <th>Uptime</th>
- <th>PID</th>
- <th>JvmPauseMonitor Count </th>
- <th>Owner</th>
- </tr>
- <tr>
- <tr>
- <td><%= new Date(runtimeBean.getStartTime()) %></a></td>
- <td><%= StringUtils.humanTimeDiff(runtimeBean.getUptime()) %></a></td>
- <td><%= JSONMetricUtil.getProcessPID() %></a></td>
- <td><%= (long)JSONMetricUtil.getValueFromMBean(rsMetrics, "pauseWarnThresholdExceeded")
- + (long)JSONMetricUtil.getValueFromMBean(rsMetrics, "pauseInfoThresholdExceeded") %></a></td>
- <td><%= runtimeBean.getSystemProperties().get("user.name") %></a></td>
- </tr>
- </table>
-</div>
-<div class="container-fluid content">
- <div class="row">
- <div class="page-header">
- <h2>Threads</h2>
- </div>
- </div>
- <table class="table table-striped" width="90%" >
- <tr>
- <th>ThreadsNew</th>
- <th>ThreadsRunable</th>
- <th>ThreadsBlocked</th>
- <th>ThreadsWaiting</th>
- <th>ThreadsTimeWaiting</th>
- <th>ThreadsTerminated</th>
- </tr>
- <tr>
- <tr>
- <td><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsNew") %></a></td>
- <td><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsRunnable")%></a></td>
- <td><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsBlocked")%></a></td>
- <td><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsWaiting")%></a></td>
- <td><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsTimedWaiting")%></a></td>
- <td><%= JSONMetricUtil.getValueFromMBean(jvmMetrics, "ThreadsTerminated")%></a></td>
- </tr>
- </table>
-</div>
-<div class="container-fluid content">
- <div class="row">
- <div class="page-header">
- <h2>GC Collectors</h2>
- </div>
- </div>
- <% if (gcBeans.size() == 2) { %>
-<div class="tabbable">
- <ul class="nav nav-pills">
- <li class="active">
- <a href="#tab_gc1" data-toggle="tab"><%=collector1.getName() %></a>
- </li>
- <li class="">
- <a href="#tab_gc2" data-toggle="tab"><%=collector2.getName() %></a>
- </li>
- </ul>
- <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
- <div class="tab-pane active" id="tab_gc1">
- <table class="table table-striped">
- <tr>
- <th>Collection Count</th>
- <th>Collection Time</th>
- <th>Last duration</th>
- </tr>
- <tr>
- <td> <%= collector1.getCollectionCount() %></td>
- <td> <%= StringUtils.humanTimeDiff(collector1.getCollectionTime()) %> </td>
- <td> <%= StringUtils.humanTimeDiff(JSONMetricUtil.getLastGcDuration(
- collector1.getObjectName())) %></td>
- </tr>
- </table>
- </div>
- <div class="tab-pane" id="tab_gc2">
- <table class="table table-striped">
- <tr>
- <th>Collection Count</th>
- <th>Collection Time</th>
- <th>Last duration</th>
- </tr>
- <tr>
- <td> <%= collector2.getCollectionCount() %></td>
- <td> <%= StringUtils.humanTimeDiff(collector2.getCollectionTime()) %> </td>
- <td> <%= StringUtils.humanTimeDiff(JSONMetricUtil.getLastGcDuration(
- collector2.getObjectName())) %></td>
- </tr>
- </table>
- </div>
- </div>
- </div>
- <%} else { %>
- <p> Can not display GC Collector stats.</p>
- <%} %>
- Total GC Collection time: <%= StringUtils.humanTimeDiff(collector1.getCollectionTime() +
- collector2.getCollectionTime())%>
-</div>
-<% for(MemoryPoolMXBean mp:mPools) {
-if(mp.getName().contains("Cache")) continue;%>
-<div class="container-fluid content">
- <div class="row">
- <div class="page-header">
- <h2><%= mp.getName() %></h2>
- </div>
- </div>
- <table class="table table-striped" width="90%" >
- <tr>
- <th>Commited</th>
- <th>Init</th>
- <th>Max</th>
- <th>Used</th>
- <th>Utilization [%]</th>
- </tr>
- <tr>
- <tr>
- <td><%= StringUtils.humanSize(mp.getUsage().getCommitted()) %></a></td>
- <td><%= StringUtils.humanSize(mp.getUsage().getInit())%></a></td>
- <td><%= StringUtils.humanSize(mp.getUsage().getMax())%></a></td>
- <td><%= StringUtils.humanSize(mp.getUsage().getUsed())%></a></td>
- <td><%= JSONMetricUtil.calcPercentage(mp.getUsage().getUsed(),
- mp.getUsage().getCommitted()) %></a></td>
- </tr>
- </table>
-</div>
-<% } %>
-
-<script src="/static/js/jquery.min.js" type="text/javascript"></script>
-<script src="/static/js/bootstrap.min.js" type="text/javascript"></script>
-<script src="/static/js/tab.js" type="text/javascript"></script>
-
-</body>
-</html>
http://git-wip-us.apache.org/repos/asf/hbase/blob/b6011a16/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 6856781..ef37f79 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -797,7 +797,7 @@ if (withReplica) {
<tr>
<td>Size</td>
<td><%= StringUtils.TraditionalBinaryPrefix.long2String(totalStoreFileSizeMB * 1024 * 1024, "B", 2)%></td>
- <td>Total size of store files (in bytes)</td>
+ <td>Total size of store files</td>
</tr>
</table>
http://git-wip-us.apache.org/repos/asf/hbase/blob/b6011a16/hbase-server/src/main/resources/hbase-webapps/regionserver/processRS.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/processRS.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/processRS.jsp
index f0df0c0..5c224ab 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/processRS.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/processRS.jsp
@@ -29,6 +29,7 @@
import="java.lang.management.GarbageCollectorMXBean"
import="org.apache.hadoop.hbase.util.JSONMetricUtil"
import="org.apache.hadoop.hbase.procedure2.util.StringUtils"
+ import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
import="com.fasterxml.jackson.databind.JsonNode"
%>
<%
@@ -209,10 +210,10 @@ if(mp.getName().contains("Cache")) continue;%>
</tr>
<tr>
<tr>
- <td><%= StringUtils.humanSize(mp.getUsage().getCommitted()) %></a></td>
- <td><%= StringUtils.humanSize(mp.getUsage().getInit())%></a></td>
- <td><%= StringUtils.humanSize(mp.getUsage().getMax())%></a></td>
- <td><%= StringUtils.humanSize(mp.getUsage().getUsed())%></a></td>
+ <td><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getCommitted(), "B", 1) %></a></td>
+ <td><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getInit(), "B", 1) %></a></td>
+ <td><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getMax(), "B", 1) %></a></td>
+ <td><%= TraditionalBinaryPrefix.long2String(mp.getUsage().getUsed(), "B", 1) %></a></td>
<td><%= JSONMetricUtil.calcPercentage(mp.getUsage().getUsed(),
mp.getUsage().getCommitted()) %></a></td>
</tr>
[12/15] hbase git commit: HBASE-19175 Added linklint files to
gitignore
Posted by bu...@apache.org.
HBASE-19175 Added linklint files to gitignore
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9d63bda8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9d63bda8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9d63bda8
Branch: refs/heads/HBASE-19189
Commit: 9d63bda8ff44764963ee3ed11eca3881037ff789
Parents: d4e3f90
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sat Nov 4 01:33:30 2017 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Nov 7 08:25:48 2017 +0100
----------------------------------------------------------------------
.gitignore | 3 +++
1 file changed, 3 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/9d63bda8/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index b9c6fb2..405edc0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,6 @@ hbase-*/test
*.ipr
patchprocess/
dependency-reduced-pom.xml
+link_report/
+linklint-*.zip
+linklint/
[06/15] hbase git commit: HBASE-19131 (Addendum) Use the emptyList()
to replace EMPTY_LIST
Posted by bu...@apache.org.
HBASE-19131 (Addendum) Use the emptyList() to replace EMPTY_LIST
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/33ede551
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/33ede551
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/33ede551
Branch: refs/heads/HBASE-19189
Commit: 33ede55164421b40c0bfe1c9d47c1db6701265c2
Parents: 9ee8e27
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Tue Nov 7 04:06:00 2017 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Tue Nov 7 04:06:00 2017 +0800
----------------------------------------------------------------------
.../src/main/java/org/apache/hadoop/hbase/ClusterStatus.java | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/33ede551/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 13c5bac..351b0c8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -138,7 +138,7 @@ public class ClusterStatus {
*/
public List<ServerName> getDeadServerNames() {
if (deadServers == null) {
- return Collections.EMPTY_LIST;
+ return Collections.emptyList();
}
return Collections.unmodifiableList(deadServers);
}
@@ -256,7 +256,7 @@ public class ClusterStatus {
public Collection<ServerName> getServers() {
if (liveServers == null) {
- return Collections.EMPTY_LIST;
+ return Collections.emptyList();
}
return Collections.unmodifiableCollection(this.liveServers.keySet());
}
@@ -281,7 +281,7 @@ public class ClusterStatus {
*/
public List<ServerName> getBackupMasters() {
if (backupMasters == null) {
- return Collections.EMPTY_LIST;
+ return Collections.emptyList();
}
return Collections.unmodifiableList(this.backupMasters);
}