You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2018/02/27 17:46:30 UTC
[01/20] hbase git commit: HBASE-20065 Addendum remove wrong comment
[Forced Update!]
Repository: hbase
Updated Branches:
refs/heads/HBASE-15151 97a3a9a31 -> 2909bf1e5 (forced update)
HBASE-20065 Addendum remove wrong comment
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a8471bd9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a8471bd9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a8471bd9
Branch: refs/heads/HBASE-15151
Commit: a8471bd98736c7ee387e268415bfd3ff96d8655d
Parents: 549a6d9
Author: zhangduo <zh...@apache.org>
Authored: Mon Feb 26 09:48:41 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Mon Feb 26 09:48:41 2018 +0800
----------------------------------------------------------------------
.../src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java | 1 -
1 file changed, 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/a8471bd9/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 803f183..e779054 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1535,7 +1535,6 @@ public class MetaTableAccessor {
RegionInfo regionA, RegionInfo regionB, ServerName sn, int regionReplication)
throws IOException {
try (Table meta = getMetaHTable(connection)) {
- // use the maximum of what master passed us vs local time.
long time = EnvironmentEdgeManager.currentTime();
// Put for parent
[12/20] hbase git commit: HBASE-20092 Fix
TestRegionMetrics#testRegionMetrics
Posted by bu...@apache.org.
HBASE-20092 Fix TestRegionMetrics#testRegionMetrics
Signed-off-by: Michael Stack <st...@apache.org>
Signed-off-by: tedyu <yu...@gmail.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dbd80130
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dbd80130
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dbd80130
Branch: refs/heads/HBASE-15151
Commit: dbd80130578ef2ed3ab1244090d36eab55ef32e6
Parents: f06a89b
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Tue Feb 27 16:48:17 2018 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Tue Feb 27 16:48:17 2018 +0800
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/TestRegionLoad.java | 15 ++++++----
.../apache/hadoop/hbase/TestRegionMetrics.java | 29 +++++++++++++++++---
2 files changed, 34 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/dbd80130/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
index d0484d6..a390aca 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
@@ -27,6 +27,7 @@ import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.client.Admin;
@@ -34,7 +35,6 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
@@ -61,12 +61,13 @@ public class TestRegionLoad {
private static final TableName TABLE_2 = TableName.valueOf("table_2");
private static final TableName TABLE_3 = TableName.valueOf("table_3");
private static final TableName[] tables = new TableName[]{TABLE_1, TABLE_2, TABLE_3};
+ private static final int MSG_INTERVAL = 500; // ms
@BeforeClass
public static void beforeClass() throws Exception {
// Make servers report eagerly. This test is about looking at the cluster status reported.
// Make it so we don't have to wait around too long to see change.
- UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 500);
+ UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", MSG_INTERVAL);
UTIL.startMiniCluster(4);
admin = UTIL.getAdmin();
admin.setBalancerRunning(false, true);
@@ -117,11 +118,13 @@ public class TestRegionLoad {
}
checkRegionsAndRegionLoads(tableRegions, regionLoads);
}
- int pause = UTIL.getConfiguration().getInt("hbase.regionserver.msginterval", 3000);
// Just wait here. If this fixes the test, come back and do a better job.
// Would have to redo the below so can wait on cluster status changing.
- Threads.sleep(2 * pause);
+ // Admin#getClusterMetrics retrieves data from HMaster. Admin#getRegionMetrics, by contrast,
+ // get the data from RS. Hence, it will fail if we do the assert check before RS has done
+ // the report.
+ TimeUnit.MILLISECONDS.sleep(3 * MSG_INTERVAL);
// Check RegionLoad matches the regionLoad from ClusterStatus
ClusterStatus clusterStatus
@@ -133,10 +136,10 @@ public class TestRegionLoad {
(v1, v2) -> {
throw new RuntimeException("impossible!!");
}, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR)));
- LOG.info("serverName=" + serverName + ", getRegionLoads=" +
+ LOG.debug("serverName=" + serverName + ", getRegionLoads=" +
serverLoad.getRegionsLoad().keySet().stream().map(r -> Bytes.toString(r)).
collect(Collectors.toList()));
- LOG.info("serverName=" + serverName + ", regionLoads=" +
+ LOG.debug("serverName=" + serverName + ", regionLoads=" +
regionLoads.keySet().stream().map(r -> Bytes.toString(r)).
collect(Collectors.toList()));
compareRegionLoads(serverLoad.getRegionsLoad(), regionLoads);
http://git-wip-us.apache.org/repos/asf/hbase/blob/dbd80130/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java
index 1f9c519..df57c49 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java
@@ -25,6 +25,8 @@ import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -37,6 +39,8 @@ import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
@@ -47,6 +51,7 @@ public class TestRegionMetrics {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestRegionMetrics.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestRegionMetrics.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static Admin admin;
@@ -54,9 +59,13 @@ public class TestRegionMetrics {
private static final TableName TABLE_2 = TableName.valueOf("table_2");
private static final TableName TABLE_3 = TableName.valueOf("table_3");
private static final TableName[] tables = new TableName[] { TABLE_1, TABLE_2, TABLE_3 };
+ private static final int MSG_INTERVAL = 500; // ms
@BeforeClass
public static void beforeClass() throws Exception {
+ // Make servers report eagerly. This test is about looking at the cluster status reported.
+ // Make it so we don't have to wait around too long to see change.
+ UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", MSG_INTERVAL);
UTIL.startMiniCluster(4);
admin = UTIL.getAdmin();
admin.balancerSwitch(false, true);
@@ -101,13 +110,25 @@ public class TestRegionMetrics {
checkRegionsAndRegionMetrics(tableRegions, regionMetrics);
}
- // Check RegionMetrics matches the RegionMetrics from ClusterStatus
- ClusterMetrics clusterStatus = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
- for (Map.Entry<ServerName, ServerMetrics> entry : clusterStatus.getLiveServerMetrics()
- .entrySet()) {
+ // Just wait here. If this fixes the test, come back and do a better job.
+ // Would have to redo the below so can wait on cluster status changing.
+ // Admin#getClusterMetrics retrieves data from HMaster. Admin#getRegionMetrics, by contrast,
+ // get the data from RS. Hence, it will fail if we do the assert check before RS has done
+ // the report.
+ TimeUnit.MILLISECONDS.sleep(3 * MSG_INTERVAL);
+
+ // Check RegionMetrics matches the RegionMetrics from ClusterMetrics
+ for (Map.Entry<ServerName, ServerMetrics> entry : admin
+ .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().entrySet()) {
ServerName serverName = entry.getKey();
ServerMetrics serverMetrics = entry.getValue();
List<RegionMetrics> regionMetrics = admin.getRegionMetrics(serverName);
+ LOG.debug("serverName=" + serverName + ", getRegionLoads=" +
+ serverMetrics.getRegionMetrics().keySet().stream().map(r -> Bytes.toString(r)).
+ collect(Collectors.toList()));
+ LOG.debug("serverName=" + serverName + ", regionLoads=" +
+ regionMetrics.stream().map(r -> Bytes.toString(r.getRegionName())).
+ collect(Collectors.toList()));
assertEquals(serverMetrics.getRegionMetrics().size(), regionMetrics.size());
}
}
[02/20] hbase git commit: HBASE-20019 Document the ColumnValueFilter
Posted by bu...@apache.org.
HBASE-20019 Document the ColumnValueFilter
Signed-off-by: Chia-Ping Tsai <ch...@gmail.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a34f129a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a34f129a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a34f129a
Branch: refs/heads/HBASE-15151
Commit: a34f129affff9b5e1098f1c9e86b1b8e7202bb97
Parents: a8471bd
Author: Reid Chan <re...@outlook.com>
Authored: Mon Feb 26 11:31:08 2018 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Mon Feb 26 14:59:42 2018 +0800
----------------------------------------------------------------------
src/main/asciidoc/_chapters/architecture.adoc | 35 ++++++++++++++++++++++
1 file changed, 35 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/a34f129a/src/main/asciidoc/_chapters/architecture.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc
index 9091d5e..6fb5891 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -321,6 +321,41 @@ SingleColumnValueFilter filter = new SingleColumnValueFilter(
scan.setFilter(filter);
----
+[[client.filter.cv.cvf]]
+==== ColumnValueFilter
+
+Introduced in HBase-2.0.0 version as a complementation of SingleColumnValueFilter, ColumnValueFilter
+gets matched cell only, while SingleColumnValueFilter gets the entire row
+(has other columns and values) to which the matched cell belongs. Parameters of constructor of
+ColumnValueFilter are the same as SingleColumnValueFilter.
+[source,java]
+----
+ColumnValueFilter filter = new ColumnValueFilter(
+ cf,
+ column,
+ CompareOperaor.EQUAL,
+ Bytes.toBytes("my value")
+ );
+scan.setFilter(filter);
+----
+
+Note. For simple query like "equals to a family:qualifier:value", we highly recommend to use the
+following way instead of using SingleColumnValueFilter or ColumnValueFilter:
+[source,java]
+----
+Scan scan = new Scan();
+scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qualifier"));
+ValueFilter vf = new ValueFilter(CompareOperator.EQUAL,
+ new BinaryComparator(Bytes.toBytes("value")));
+scan.setFilter(vf);
+...
+----
+This scan will restrict to the specified column 'family:qualifier', avoiding scan unrelated
+families and columns, which has better performance, and `ValueFilter` is the condition used to do
+the value filtering.
+
+But if query is much more complicated beyond this book, then please make your good choice case by case.
+
[[client.filter.cvp]]
=== Column Value Comparators
[20/20] hbase git commit: HBASE-18467 rely on parallel pipeline.
Posted by bu...@apache.org.
HBASE-18467 rely on parallel pipeline.
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2909bf1e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2909bf1e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2909bf1e
Branch: refs/heads/HBASE-15151
Commit: 2909bf1e5f2af54dd57eef3ea0c1eb9407d0c764
Parents: f3bedc7
Author: Sean Busbey <bu...@apache.org>
Authored: Tue Feb 27 11:43:50 2018 -0600
Committer: Sean Busbey <bu...@apache.org>
Committed: Tue Feb 27 11:43:50 2018 -0600
----------------------------------------------------------------------
dev-support/Jenkinsfile | 554 +++++++++++++++++++++----------------------
1 file changed, 266 insertions(+), 288 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/2909bf1e/dev-support/Jenkinsfile
----------------------------------------------------------------------
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 6e37c70..c22b78d 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -117,309 +117,287 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
stash name: 'yetus', includes: "yetus-*/*,yetus-*/**/*,tools/personality.sh"
}
}
- stage ('yetus general check') {
- environment {
- // TODO does hadoopcheck need to be jdk specific?
- // Should be things that work with multijdk
- TESTS = 'all,-unit,-findbugs'
- // on branches that don't support jdk7, this will already be JAVA_HOME, so we'll end up not
- // doing multijdk there.
- MULTIJDK = '/usr/lib/jvm/java-8-openjdk-amd64'
- OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}"
- OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}"
- }
- steps {
- unstash 'yetus'
- sh '''#!/usr/bin/env bash
- rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
- rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
- "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
+ stage ('health checks') {
+ parallel {
+ stage ('yetus general check') {
+ environment {
+ // TODO does hadoopcheck need to be jdk specific?
+ // Should be things that work with multijdk
+ TESTS = 'all,-unit,-findbugs'
+ // on branches that don't support jdk7, this will already be JAVA_HOME, so we'll end up not
+ // doing multijdk there.
+ MULTIJDK = '/usr/lib/jvm/java-8-openjdk-amd64'
+ OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}"
+ OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}"
+ }
+ steps {
+ unstash 'yetus'
+ sh '''#!/usr/bin/env bash
+ rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+ rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+ "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
'''
- // TODO should this be a download from master, similar to how the personality is?
- sh '''#!/usr/bin/env bash
- declare commentfile
- rm -rf "${OUTPUT_DIR}/success}" "${OUTPUT_DIR}/failure"
- if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
- commentfile="${OUTPUT_DIR}/success"
- echo '(/) {color:green}+1 general checks{color}' >> "${commentfile}"
- else
- commentfile="${OUTPUT_DIR}/failure"
- echo '(x) {color:red}-1 general checks{color}' >> "${commentfile}"
- fi
- echo "-- For more information [see general report|${BUILD_URL}/General_Nightly_Build_Report/]" >> "${commentfile}"
- '''
- }
- post {
- always {
- // Has to be relative to WORKSPACE.
- archive "${env.OUTPUT_DIR_RELATIVE}/*"
- archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
- publishHTML target: [
- allowMissing: true,
- keepAll: true,
- alwaysLinkToLastBuild: true,
- // Has to be relative to WORKSPACE
- reportDir: "${env.OUTPUT_DIR_RELATIVE}",
- reportFiles: 'console-report.html',
- reportName: 'General Nightly Build Report'
- ]
+ // TODO roll this into the hbase_nightly_yetus script
+ sh '''#!/usr/bin/env bash
+ if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+ echo '(/) {color:green}+1 general checks{color}' >> "${OUTPUT_DIR}/commentfile"
+ else
+ echo '(x) {color:red}-1 general checks{color}' >> "${OUTPUT_DIR}/commentfile"
+ fi
+ echo "-- For more information [see general report|${BUILD_URL}/General_Nightly_Build_Report/]" >> "${OUTPUT_DIR}/commentfile"
+ '''
+ }
+ post {
+ always {
+ // Has to be relative to WORKSPACE.
+ archive "${env.OUTPUT_DIR_RELATIVE}/*"
+ archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
+ publishHTML target: [
+ allowMissing: true,
+ keepAll: true,
+ alwaysLinkToLastBuild: true,
+ // Has to be relative to WORKSPACE
+ reportDir: "${env.OUTPUT_DIR_RELATIVE}",
+ reportFiles: 'console-report.html',
+ reportName: 'General Nightly Build Report'
+ ]
+ }
+ }
}
- }
- }
- stage ('yetus jdk7 checks') {
- when {
- branch 'branch-1*'
- }
- environment {
- TESTS = 'mvninstall,compile,javac,unit,htmlout'
- OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_JDK7}"
- OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_JDK7}"
- // On branches where we do jdk7 checks, jdk7 will be JAVA_HOME already.
- }
- steps {
- unstash 'yetus'
- sh '''#!/usr/bin/env bash
- rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
- rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
- "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
+ stage ('yetus jdk7 checks') {
+ when {
+ branch 'branch-1*'
+ }
+ environment {
+ TESTS = 'mvninstall,compile,javac,unit,htmlout'
+ OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_JDK7}"
+ OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_JDK7}"
+ // On branches where we do jdk7 checks, jdk7 will be JAVA_HOME already.
+ }
+ steps {
+ unstash 'yetus'
+ sh '''#!/usr/bin/env bash
+ rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+ rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+ "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
'''
- sh '''#!/usr/bin/env bash
- # for branch-1.1 we don't do jdk8 findbugs, so do it here
- if [ "${BRANCH_NAME}" == "branch-1.1" ]; then
- TESTS+=",findbugs"
- fi
- declare commentfile
- rm -rf "${OUTPUT_DIR}/success}" "${OUTPUT_DIR}/failure"
- if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
- commentfile="${OUTPUT_DIR}/success"
- echo '(/) {color:green}+1 jdk7 checks{color}' >> "${commentfile}"
- else
- commentfile="${OUTPUT_DIR}/failure"
- echo '(x) {color:red}-1 jdk7 checks{color}' >> "${commentfile}"
- fi
- echo "-- For more information [see jdk7 report|${BUILD_URL}/JDK7_Nightly_Build_Report/]" >> "${commentfile}"
- '''
- }
- post {
- always {
- junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
- // zip surefire reports.
- sh '''#!/bin/bash -e
- if [ -d "${OUTPUT_DIR}/archiver" ]; then
- count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
- if [[ 0 -ne ${count} ]]; then
- echo "zipping ${count} archived files"
- zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+ sh '''#!/usr/bin/env bash
+ # for branch-1.1 we don't do jdk8 findbugs, so do it here
+ if [ "${BRANCH_NAME}" == "branch-1.1" ]; then
+ TESTS+=",findbugs"
+ fi
+ rm -rf "${OUTPUT_DIR}/commentfile}"
+ if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+ echo '(/) {color:green}+1 jdk7 checks{color}' >> "${OUTPUT_DIR}/commentfile"
else
- echo "No archived files, skipping compressing."
+ echo '(x) {color:red}-1 jdk7 checks{color}' >> "${OUTPUT_DIR}/commentfile"
fi
- else
- echo "No archiver directory, skipping compressing."
- fi
+ echo "-- For more information [see jdk7 report|${BUILD_URL}/JDK7_Nightly_Build_Report/]" >> "${OUTPUT_DIR}/commentfile"
+ '''
+ }
+ post {
+ always {
+ junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
+ // zip surefire reports.
+ sh '''#!/bin/bash -e
+ if [ -d "${OUTPUT_DIR}/archiver" ]; then
+ count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
+ if [[ 0 -ne ${count} ]]; then
+ echo "zipping ${count} archived files"
+ zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+ else
+ echo "No archived files, skipping compressing."
+ fi
+ else
+ echo "No archiver directory, skipping compressing."
+ fi
'''
- // Has to be relative to WORKSPACE.
- archive "${env.OUTPUT_DIR_RELATIVE}/*"
- archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
- publishHTML target: [
- allowMissing : true,
- keepAll : true,
- alwaysLinkToLastBuild: true,
- // Has to be relative to WORKSPACE.
- reportDir : "${env.OUTPUT_DIR_RELATIVE}",
- reportFiles : 'console-report.html',
- reportName : 'JDK7 Nightly Build Report'
- ]
- }
- }
- }
- stage ('yetus jdk8 hadoop2 checks') {
- when {
- not {
- branch 'branch-1.1*'
+ // Has to be relative to WORKSPACE.
+ archive "${env.OUTPUT_DIR_RELATIVE}/*"
+ archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
+ publishHTML target: [
+ allowMissing : true,
+ keepAll : true,
+ alwaysLinkToLastBuild: true,
+ // Has to be relative to WORKSPACE.
+ reportDir : "${env.OUTPUT_DIR_RELATIVE}",
+ reportFiles : 'console-report.html',
+ reportName : 'JDK7 Nightly Build Report'
+ ]
+ }
+ }
}
- }
- environment {
- TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
- OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
- OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
- // This isn't strictly needed on branches that only support jdk8, but doesn't hurt
- // and is needed on branches that do both jdk7 and jdk8
- SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
- }
- steps {
- unstash 'yetus'
- sh '''#!/usr/bin/env bash
- rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
- rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
- "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
+ stage ('yetus jdk8 hadoop2 checks') {
+ when {
+ not {
+ branch 'branch-1.1*'
+ }
+ }
+ environment {
+ TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
+ OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
+ OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
+ // This isn't strictly needed on branches that only support jdk8, but doesn't hurt
+ // and is needed on branches that do both jdk7 and jdk8
+ SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
+ }
+ steps {
+ unstash 'yetus'
+ sh '''#!/usr/bin/env bash
+ rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+ rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+ "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
'''
- sh '''#!/usr/bin/env bash
- declare commentfile
- rm -rf "${OUTPUT_DIR}/success}" "${OUTPUT_DIR}/failure"
- if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
- commentfile="${OUTPUT_DIR}/success"
- echo '(/) {color:green}+1 jdk8 hadoop2 checks{color}' >> "${commentfile}"
- else
- commentfile="${OUTPUT_DIR}/failure"
- echo '(x) {color:red}-1 jdk8 hadoop2 checks{color}' >> "${commentfile}"
- fi
- echo "-- For more information [see jdk8 (hadoop2) report|${BUILD_URL}/JDK8_Nightly_Build_Report_(Hadoop2)/]" >> "${commentfile}"
- '''
- }
- post {
- always {
- junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
- // zip surefire reports.
- sh '''#!/bin/bash -e
- if [ -d "${OUTPUT_DIR}/archiver" ]; then
- count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
- if [[ 0 -ne ${count} ]]; then
- echo "zipping ${count} archived files"
- zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+ sh '''#!/usr/bin/env bash
+ rm -rf "${OUTPUT_DIR}/commentfile}"
+ if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+ echo '(/) {color:green}+1 jdk8 hadoop2 checks{color}' >> "${OUTPUT_DIR}/commentfile"
else
- echo "No archived files, skipping compressing."
+ commentfile="${OUTPUT_DIR}/failure"
+ echo '(x) {color:red}-1 jdk8 hadoop2 checks{color}' >> "${OUTPUT_DIR}/commentfile"
fi
- else
- echo "No archiver directory, skipping compressing."
- fi
+ echo "-- For more information [see jdk8 (hadoop2) report|${BUILD_URL}/JDK8_Nightly_Build_Report_(Hadoop2)/]" >> "${OUTPUT_DIR}/commentfile"
+ '''
+ }
+ post {
+ always {
+ junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
+ // zip surefire reports.
+ sh '''#!/bin/bash -e
+ if [ -d "${OUTPUT_DIR}/archiver" ]; then
+ count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
+ if [[ 0 -ne ${count} ]]; then
+ echo "zipping ${count} archived files"
+ zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+ else
+ echo "No archived files, skipping compressing."
+ fi
+ else
+ echo "No archiver directory, skipping compressing."
+ fi
'''
- // Has to be relative to WORKSPACE.
- archive "${env.OUTPUT_DIR_RELATIVE}/*"
- archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
- publishHTML target: [
- allowMissing : true,
- keepAll : true,
- alwaysLinkToLastBuild: true,
- // Has to be relative to WORKSPACE.
- reportDir : "${env.OUTPUT_DIR_RELATIVE}",
- reportFiles : 'console-report.html',
- reportName : 'JDK8 Nightly Build Report (Hadoop2)'
- ]
- }
- }
- }
- stage ('yetus jdk8 hadoop3 checks') {
- when {
- not {
- branch 'branch-1*'
+ // Has to be relative to WORKSPACE.
+ archive "${env.OUTPUT_DIR_RELATIVE}/*"
+ archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
+ publishHTML target: [
+ allowMissing : true,
+ keepAll : true,
+ alwaysLinkToLastBuild: true,
+ // Has to be relative to WORKSPACE.
+ reportDir : "${env.OUTPUT_DIR_RELATIVE}",
+ reportFiles : 'console-report.html',
+ reportName : 'JDK8 Nightly Build Report (Hadoop2)'
+ ]
+ }
+ }
}
- }
- environment {
- TESTS = 'mvninstall,compile,javac,unit,htmlout'
- OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
- OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
- // This isn't strictly needed on branches that only support jdk8, but doesn't hurt
- // and is needed on branches that do both jdk7 and jdk8
- SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
- // Activates hadoop 3.0 profile in maven runs.
- HADOOP_PROFILE = '3.0'
- }
- steps {
- unstash 'yetus'
- sh '''#!/usr/bin/env bash
- rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
- rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
- "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
+ stage ('yetus jdk8 hadoop3 checks') {
+ when {
+ not {
+ branch 'branch-1*'
+ }
+ }
+ environment {
+ TESTS = 'mvninstall,compile,javac,unit,htmlout'
+ OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
+ OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
+ // This isn't strictly needed on branches that only support jdk8, but doesn't hurt
+ // and is needed on branches that do both jdk7 and jdk8
+ SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
+ // Activates hadoop 3.0 profile in maven runs.
+ HADOOP_PROFILE = '3.0'
+ }
+ steps {
+ unstash 'yetus'
+ sh '''#!/usr/bin/env bash
+ rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+ rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+ "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
'''
- sh '''#!/usr/bin/env bash
- declare commentfile
- rm -rf "${OUTPUT_DIR}/success}" "${OUTPUT_DIR}/failure"
- if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
- commentfile="${OUTPUT_DIR}/success"
- echo '(/) {color:green}+1 jdk8 hadoop3 checks{color}' >> "${commentfile}"
- else
- commentfile="${OUTPUT_DIR}/failure"
- echo '(x) {color:red}-1 jdk8 hadoop3 checks{color}' >> "${commentfile}"
- fi
- echo "-- For more information [see jdk8 (hadoop3) report|${BUILD_URL}/JDK8_Nightly_Build_Report_(Hadoop3)/]" >> "${commentfile}"
- '''
- }
- post {
- always {
- // Not sure how two junit test reports will work. Disabling this for now.
- // junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
- // zip surefire reports.
- sh '''#!/bin/bash -e
- if [ -d "${OUTPUT_DIR}/archiver" ]; then
- count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
- if [[ 0 -ne ${count} ]]; then
- echo "zipping ${count} archived files"
- zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+ sh '''#!/usr/bin/env bash
+ rm -rf "${OUTPUT_DIR}/commentfile}"
+ if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+ echo '(/) {color:green}+1 jdk8 hadoop3 checks{color}' >> "${OUTPUT_DIR}/commentfile"
else
- echo "No archived files, skipping compressing."
+ echo '(x) {color:red}-1 jdk8 hadoop3 checks{color}' >> "${OUTPUT_DIR}/commentfile"
fi
- else
- echo "No archiver directory, skipping compressing."
- fi
+ echo "-- For more information [see jdk8 (hadoop3) report|${BUILD_URL}/JDK8_Nightly_Build_Report_(Hadoop3)/]" >> "${OUTPUT_DIR}/commentfile"
+ '''
+ }
+ post {
+ always {
+ // Not sure how two junit test reports will work. Disabling this for now.
+ // junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
+ // zip surefire reports.
+ sh '''#!/bin/bash -e
+ if [ -d "${OUTPUT_DIR}/archiver" ]; then
+ count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
+ if [[ 0 -ne ${count} ]]; then
+ echo "zipping ${count} archived files"
+ zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
+ else
+ echo "No archived files, skipping compressing."
+ fi
+ else
+ echo "No archiver directory, skipping compressing."
+ fi
'''
- // Has to be relative to WORKSPACE.
- archive "${env.OUTPUT_DIR_RELATIVE}/*"
- archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
- publishHTML target: [
- allowMissing : true,
- keepAll : true,
- alwaysLinkToLastBuild: true,
- // Has to be relative to WORKSPACE.
- reportDir : "${env.OUTPUT_DIR_RELATIVE}",
- reportFiles : 'console-report.html',
- reportName : 'JDK8 Nightly Build Report (Hadoop3)'
- ]
+ // Has to be relative to WORKSPACE.
+ archive "${env.OUTPUT_DIR_RELATIVE}/*"
+ archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
+ publishHTML target: [
+ allowMissing : true,
+ keepAll : true,
+ alwaysLinkToLastBuild: true,
+ // Has to be relative to WORKSPACE.
+ reportDir : "${env.OUTPUT_DIR_RELATIVE}",
+ reportFiles : 'console-report.html',
+ reportName : 'JDK8 Nightly Build Report (Hadoop3)'
+ ]
+ }
+ }
}
- }
- }
- // This is meant to mimic what a release manager will do to create RCs.
- // See http://hbase.apache.org/book.html#maven.release
- stage ('create source tarball') {
- tools {
- maven 'Maven (latest)'
- // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in.
- jdk "JDK 1.8 (latest)"
- }
- steps {
- sh '''#!/bin/bash -e
- echo "Setting up directories"
- rm -rf "output-srctarball" && mkdir "output-srctarball"
- rm -rf "unpacked_src_tarball" && mkdir "unpacked_src_tarball"
- rm -rf ".m2-for-repo" && mkdir ".m2-for-repo"
- rm -rf ".m2-for-src" && mkdir ".m2-for-src"
- rm -rf "src_tarball_success" "src_tarball_failure"
+ // This is meant to mimic what a release manager will do to create RCs.
+ // See http://hbase.apache.org/book.html#maven.release
+ stage ('create source tarball') {
+ tools {
+ maven 'Maven (latest)'
+ // this needs to be set to the jdk that ought to be used to build releases on the branch the Jenkinsfile is stored in.
+ jdk "JDK 1.8 (latest)"
+ }
+ steps {
+ sh '''#!/bin/bash -e
+ echo "Setting up directories"
+ rm -rf "output-srctarball" && mkdir "output-srctarball"
+ rm -rf "unpacked_src_tarball" && mkdir "unpacked_src_tarball"
+ rm -rf ".m2-for-repo" && mkdir ".m2-for-repo"
+ rm -rf ".m2-for-src" && mkdir ".m2-for-src"
+ rm -rf "src_tarball_commentfile"
'''
- sh '''#!/usr/bin/env bash
- rm -rf "output-srctarball/machine" && mkdir "output-srctarball/machine"
- "${BASEDIR}/dev-support/gather_machine_environment.sh" "output-srctarball/machine"
+ sh '''#!/usr/bin/env bash
+ rm -rf "output-srctarball/machine" && mkdir "output-srctarball/machine"
+ "${BASEDIR}/dev-support/gather_machine_environment.sh" "output-srctarball/machine"
'''
- sh """#!/bin/bash -e
- ${env.BASEDIR}/dev-support/hbase_nightly_source-artifact.sh \
- --intermediate-file-dir output-srctarball \
- --unpack-temp-dir unpacked_src_tarball \
- --maven-m2-initial .m2-for-repo \
- --maven-m2-src-build .m2-for-src \
- --clean-source-checkout \
- ${env.BASEDIR}
+ sh """#!/bin/bash -e
+ ${env.BASEDIR}/dev-support/hbase_nightly_source-artifact.sh \
+ --intermediate-file-dir output-srctarball \
+ --unpack-temp-dir unpacked_src_tarball \
+ --maven-m2-initial .m2-for-repo \
+ --maven-m2-src-build .m2-for-src \
+ --clean-source-checkout \
+ ${env.BASEDIR}
"""
- }
- post {
- always {
- archive 'output-srctarball/*'
- }
- // This approach only works because the source release artifact is the last stage that does work.
- success {
- writeFile file: "${env.WORKSPACE}/src_tarball_success", text: '(/) {color:green}+1 source release artifact{color}\n-- See build output for details.'
- }
- failure {
- writeFile file: "${env.WORKSPACE}/src_tarball_failure", text: '(x) {color:red}-1 source release artifact{color}\n-- See build output for details.'
- }
- }
- }
- stage ('Fail if previous stages failed') {
- steps {
- script {
- def failures = ['src_tarball_failure', "${env.OUTPUT_DIR_RELATIVE_GENERAL}/failure",
- "${env.OUTPUT_DIR_RELATIVE_JDK7}/failure", "${OUTPUT_DIR_RELATIVE_HADOOP2}/failure",
- "${env.OUTPUT_DIR_RELATIVE_HADOOP3}/failure"]
- for ( failure_file in failures ) {
- if (fileExists(file: failure_file)) {
- error 'Failing job due to failure(s) in prior steps.'
+ }
+ post {
+ always {
+ archive 'output-srctarball/*'
+ }
+ // This approach only works because the source release artifact is the last stage that does work.
+ success {
+ writeFile file: "${env.WORKSPACE}/src_tarball_commentfile", text: '(/) {color:green}+1 source release artifact{color}\n-- See build output for details.'
+ }
+ failure {
+ writeFile file: "${env.WORKSPACE}/src_tarball_commentfile", text: '(x) {color:red}-1 source release artifact{color}\n-- See build output for details.'
}
}
}
@@ -431,11 +409,11 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
script {
try {
sh "printenv"
- def results = ["${env.OUTPUT_DIR_RELATIVE_GENERAL}/failure", "${env.OUTPUT_DIR_RELATIVE_GENERAL}/success",
- "${env.OUTPUT_DIR_RELATIVE_JDK7}/failure", "${env.OUTPUT_DIR_RELATIVE_JDK7}/success",
- "${env.OUTPUT_DIR_RELATIVE_HADOOP2}/failure", "${env.OUTPUT_DIR_RELATIVE_HADOOP2}/success",
- "${env.OUTPUT_DIR_RELATIVE_HADOOP3}/failure", "${env.OUTPUT_DIR_RELATIVE_HADOOP3}/success",
- 'src_tarball_failure', 'src_tarball_success']
+ def results = ["${env.OUTPUT_DIR_RELATIVE_GENERAL}/commentfile",
+ "${env.OUTPUT_DIR_RELATIVE_JDK7}/commentfile",
+ "${env.OUTPUT_DIR_RELATIVE_HADOOP2}/commentfile",
+ "${env.OUTPUT_DIR_RELATIVE_HADOOP3}/commentfile",
+ 'src_tarball_commentfile']
echo env.BRANCH_NAME
echo env.BUILD_URL
echo currentBuild.result
[06/20] hbase git commit: HBASE-20036
TestAvoidCellReferencesIntoShippedBlocks timed out (Ram)
Posted by bu...@apache.org.
HBASE-20036 TestAvoidCellReferencesIntoShippedBlocks timed out (Ram)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7cfb4643
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7cfb4643
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7cfb4643
Branch: refs/heads/HBASE-15151
Commit: 7cfb46432fbdf9b53592be11efc8a7d79d1a9455
Parents: a29b3ca
Author: Vasudevan <ra...@intel.com>
Authored: Mon Feb 26 22:07:42 2018 +0530
Committer: Vasudevan <ra...@intel.com>
Committed: Mon Feb 26 22:07:42 2018 +0530
----------------------------------------------------------------------
...estAvoidCellReferencesIntoShippedBlocks.java | 30 +++++++++++---------
1 file changed, 17 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/7cfb4643/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
index 0e12ad6..d22772a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
@@ -400,23 +400,27 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
scanner = table.getScanner(s1);
int count = Iterables.size(scanner);
assertEquals("Count the rows", 2, count);
- iterator = cache.iterator();
- List<BlockCacheKey> newCacheList = new ArrayList<>();
- while (iterator.hasNext()) {
- CachedBlock next = iterator.next();
- BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
- newCacheList.add(cacheKey);
- }
int newBlockRefCount = 0;
- for (BlockCacheKey key : cacheList) {
- if (newCacheList.contains(key)) {
- newBlockRefCount++;
+ List<BlockCacheKey> newCacheList = new ArrayList<>();
+ while (true) {
+ newBlockRefCount = 0;
+ newCacheList.clear();
+ iterator = cache.iterator();
+ while (iterator.hasNext()) {
+ CachedBlock next = iterator.next();
+ BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
+ newCacheList.add(cacheKey);
+ }
+ for (BlockCacheKey key : cacheList) {
+ if (newCacheList.contains(key)) {
+ newBlockRefCount++;
+ }
+ }
+ if (newBlockRefCount == 6) {
+ break;
}
}
-
- assertEquals("old blocks should still be found ", 6, newBlockRefCount);
latch.countDown();
-
} catch (IOException e) {
}
}
[09/20] hbase git commit: HBASE-20086 PE randomSeekScan fails with
ClassNotFoundException
Posted by bu...@apache.org.
HBASE-20086 PE randomSeekScan fails with ClassNotFoundException
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d3aefe78
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d3aefe78
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d3aefe78
Branch: refs/heads/HBASE-15151
Commit: d3aefe783476e860e7b1c474b50cf18a7ae0be00
Parents: b11e506
Author: tedyu <yu...@gmail.com>
Authored: Mon Feb 26 18:29:35 2018 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Mon Feb 26 18:29:35 2018 -0800
----------------------------------------------------------------------
.../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d3aefe78/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index a5fa2c7..5a63ef4 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -527,7 +527,9 @@ public class PerformanceEvaluation extends Configured implements Tool {
TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
Histogram.class, // yammer metrics
- ObjectMapper.class); // jackson-mapper-asl
+ ObjectMapper.class, // jackson-mapper-asl
+ FilterAllFilter.class // hbase-server tests jar
+ );
TableMapReduceUtil.initCredentials(job);
[04/20] hbase git commit: HBASE-20083 Fix findbugs error for
ReplicationSyncUp
Posted by bu...@apache.org.
HBASE-20083 Fix findbugs error for ReplicationSyncUp
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2beda62a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2beda62a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2beda62a
Branch: refs/heads/HBASE-15151
Commit: 2beda62a10f0828eb10cec28b0ba53246cd0b671
Parents: 8c74d17
Author: zhangduo <zh...@apache.org>
Authored: Mon Feb 26 16:37:58 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Mon Feb 26 22:13:13 2018 +0800
----------------------------------------------------------------------
.../regionserver/ReplicationSyncUp.java | 59 ++++++--------------
.../replication/TestReplicationSyncUpTool.java | 6 +-
2 files changed, 19 insertions(+), 46 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/2beda62a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index 283eb96..c2862de 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -49,35 +49,18 @@ import org.apache.hadoop.util.ToolRunner;
*/
public class ReplicationSyncUp extends Configured implements Tool {
- private static Configuration conf;
-
private static final long SLEEP_TIME = 10000;
- // although the tool is designed to be run on command line
- // this api is provided for executing the tool through another app
- public static void setConfigure(Configuration config) {
- conf = config;
- }
-
/**
* Main program
- * @param args
- * @throws Exception
*/
public static void main(String[] args) throws Exception {
- if (conf == null) conf = HBaseConfiguration.create();
- int ret = ToolRunner.run(conf, new ReplicationSyncUp(), args);
+ int ret = ToolRunner.run(HBaseConfiguration.create(), new ReplicationSyncUp(), args);
System.exit(ret);
}
@Override
public int run(String[] args) throws Exception {
- Replication replication;
- ReplicationSourceManager manager;
- FileSystem fs;
- Path oldLogDir, logDir, walRootDir;
- ZKWatcher zkw;
-
Abortable abortable = new Abortable() {
@Override
public void abort(String why, Throwable e) {
@@ -88,23 +71,19 @@ public class ReplicationSyncUp extends Configured implements Tool {
return false;
}
};
-
- zkw =
- new ZKWatcher(conf, "syncupReplication" + System.currentTimeMillis(), abortable,
- true);
-
- walRootDir = FSUtils.getWALRootDir(conf);
- fs = FSUtils.getWALFileSystem(conf);
- oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
- logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
-
- System.out.println("Start Replication Server start");
- replication = new Replication();
- replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, null);
- manager = replication.getReplicationManager();
- manager.init().get();
-
- try {
+ Configuration conf = getConf();
+ try (ZKWatcher zkw =
+ new ZKWatcher(conf, "syncupReplication" + System.currentTimeMillis(), abortable, true)) {
+ Path walRootDir = FSUtils.getWALRootDir(conf);
+ FileSystem fs = FSUtils.getWALFileSystem(conf);
+ Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
+ Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
+
+ System.out.println("Start Replication Server start");
+ Replication replication = new Replication();
+ replication.initialize(new DummyServer(zkw), fs, logDir, oldLogDir, null);
+ ReplicationSourceManager manager = replication.getReplicationManager();
+ manager.init().get();
while (manager.activeFailoverTaskCount() > 0) {
Thread.sleep(SLEEP_TIME);
}
@@ -114,15 +93,12 @@ public class ReplicationSyncUp extends Configured implements Tool {
manager.join();
} catch (InterruptedException e) {
System.err.println("didn't wait long enough:" + e);
- return (-1);
- } finally {
- zkw.close();
+ return -1;
}
-
return 0;
}
- static class DummyServer implements Server {
+ class DummyServer implements Server {
String hostname;
ZKWatcher zkw;
@@ -138,7 +114,7 @@ public class ReplicationSyncUp extends Configured implements Tool {
@Override
public Configuration getConfiguration() {
- return conf;
+ return getConf();
}
@Override
@@ -191,7 +167,6 @@ public class ReplicationSyncUp extends Configured implements Tool {
@Override
public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
return null;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/2beda62a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
index 19aeac1..6c487ad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.ToolRunner;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
@@ -420,9 +421,6 @@ public class TestReplicationSyncUpTool extends TestReplicationBase {
}
protected void syncUp(HBaseTestingUtility ut) throws Exception {
- ReplicationSyncUp.setConfigure(ut.getConfiguration());
- String[] arguments = new String[] { null };
- new ReplicationSyncUp().run(arguments);
+ ToolRunner.run(ut.getConfiguration(), new ReplicationSyncUp(), new String[0]);
}
-
}
[14/20] hbase git commit: HBASE-20088 Update NOTICE.txt year
Posted by bu...@apache.org.
HBASE-20088 Update NOTICE.txt year
Signed-off-by: Andrew Purtell <ap...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3443aa96
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3443aa96
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3443aa96
Branch: refs/heads/HBASE-15151
Commit: 3443aa96b4357d06181fafd9ad03d6a846df9d88
Parents: e47d1e4
Author: Josh Elser <el...@apache.org>
Authored: Mon Feb 26 17:00:50 2018 -0500
Committer: Josh Elser <el...@apache.org>
Committed: Tue Feb 27 09:52:30 2018 -0500
----------------------------------------------------------------------
NOTICE.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/3443aa96/NOTICE.txt
----------------------------------------------------------------------
diff --git a/NOTICE.txt b/NOTICE.txt
index 9c238be..ba4b609 100755
--- a/NOTICE.txt
+++ b/NOTICE.txt
@@ -1,5 +1,5 @@
Apache HBase
-Copyright 2007-2017 The Apache Software Foundation
+Copyright 2007-2018 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
[10/20] hbase git commit: HBASE-20069 fix existing findbugs errors in
hbase-server; ADDENDUM Address review
Posted by bu...@apache.org.
HBASE-20069 fix existing findbugs errors in hbase-server; ADDENDUM Address review
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d272ac90
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d272ac90
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d272ac90
Branch: refs/heads/HBASE-15151
Commit: d272ac908ceb4696e05431066ae02d953fa6fc9d
Parents: d3aefe7
Author: Michael Stack <st...@apache.org>
Authored: Mon Feb 26 16:26:36 2018 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Mon Feb 26 23:16:21 2018 -0800
----------------------------------------------------------------------
.../hadoop/hbase/master/ClusterStatusPublisher.java | 6 +++++-
.../hadoop/hbase/regionserver/MemStoreFlusher.java | 14 +++-----------
.../hbase/regionserver/RegionCoprocessorHost.java | 4 +++-
3 files changed, 11 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d272ac90/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
index 5e97204..21fa263 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
@@ -134,7 +134,7 @@ public class ClusterStatusPublisher extends ScheduledChore {
@Override
protected void chore() {
- if (!connected) {
+ if (!isConnected()) {
return;
}
@@ -170,6 +170,10 @@ public class ClusterStatusPublisher extends ScheduledChore {
publisher.close();
}
+ private synchronized boolean isConnected() {
+ return this.connected;
+ }
+
/**
* Create the dead server to send. A dead server is sent NB_SEND times. We send at max
* MAX_SERVER_PER_MESSAGE at a time. if there are too many dead servers, we send the newly
http://git-wip-us.apache.org/repos/asf/hbase/blob/d272ac90/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index a0e65ec..23321e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -90,17 +90,9 @@ class MemStoreFlusher implements FlushRequester {
private FlushType flushType;
/**
- * Singleton instance of this class inserted into flush queue.
+ * Singleton instance inserted into flush queue used for signaling.
*/
- private static final WakeupFlushThread WAKEUPFLUSH_INSTANCE = new WakeupFlushThread();
-
- /**
- * Marker class used as a token inserted into flush queue that ensures the flusher does not sleep.
- * Create a single instance only.
- */
- private static final class WakeupFlushThread implements FlushQueueEntry {
- private WakeupFlushThread() {}
-
+ private static final FlushQueueEntry WAKEUPFLUSH_INSTANCE = new FlushQueueEntry() {
@Override
public long getDelay(TimeUnit unit) {
return 0;
@@ -120,7 +112,7 @@ class MemStoreFlusher implements FlushRequester {
public int hashCode() {
return 42;
}
- }
+ };
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/d272ac90/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index f3c93dc..47b389a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -1119,6 +1119,8 @@ public class RegionCoprocessorHost
* @return true or false to return to client if default processing should be bypassed, or null
* otherwise
*/
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_BOOLEAN_RETURN_NULL",
+ justification="Null is legit")
public Boolean preCheckAndPutAfterRowLock(
final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op,
final ByteArrayComparable comparator, final Put put) throws IOException {
@@ -1207,7 +1209,7 @@ public class RegionCoprocessorHost
* or null otherwise
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_BOOLEAN_RETURN_NULL",
- justification="TODO: Fix")
+ justification="Null is legit")
public Boolean preCheckAndDeleteAfterRowLock(final byte[] row, final byte[] family,
final byte[] qualifier, final CompareOperator op, final ByteArrayComparable comparator,
final Delete delete) throws IOException {
[08/20] hbase git commit: HBASE-20069 fix existing findbugs errors in
hbase-server
Posted by bu...@apache.org.
HBASE-20069 fix existing findbugs errors in hbase-server
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b11e5066
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b11e5066
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b11e5066
Branch: refs/heads/HBASE-15151
Commit: b11e506664614c243c08949c256430d4dd13ba6c
Parents: 73028d5
Author: Michael Stack <st...@apache.org>
Authored: Sat Feb 24 13:01:02 2018 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Mon Feb 26 16:01:31 2018 -0800
----------------------------------------------------------------------
.../hbase/io/encoding/EncodedDataBlock.java | 19 +--
.../apache/hadoop/hbase/nio/MultiByteBuff.java | 4 +-
.../hadoop/hbase/nio/TestMultiByteBuff.java | 19 +++
.../hbase/procedure2/ProcedureExecutor.java | 1 -
.../hbase/procedure2/StateMachineProcedure.java | 1 -
.../org/apache/hadoop/hbase/ipc/RpcServer.java | 3 +
.../org/apache/hadoop/hbase/master/HMaster.java | 1 -
.../master/assignment/AssignmentManager.java | 1 -
.../assignment/SplitTableRegionProcedure.java | 7 +-
.../hbase/master/cleaner/CleanerChore.java | 39 ++++---
.../hadoop/hbase/regionserver/HRegion.java | 3 +-
.../hbase/regionserver/MemStoreFlusher.java | 115 +++++++++++++------
.../hbase/regionserver/RSRpcServices.java | 1 -
.../regionserver/RegionCoprocessorHost.java | 2 +
.../hbase/regionserver/wal/AsyncFSWAL.java | 4 +-
.../hbase/util/compaction/MajorCompactor.java | 9 +-
16 files changed, 147 insertions(+), 82 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
index a791c09..af68656 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
@@ -228,6 +228,7 @@ public class EncodedDataBlock {
*/
public byte[] encodeData() {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ byte [] baosBytes = null;
try {
baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
DataOutputStream out = new DataOutputStream(baos);
@@ -255,25 +256,17 @@ public class EncodedDataBlock {
kv.setSequenceId(memstoreTS);
this.dataBlockEncoder.encode(kv, encodingCtx, out);
}
- BufferGrabbingByteArrayOutputStream stream = new BufferGrabbingByteArrayOutputStream();
- baos.writeTo(stream);
- this.dataBlockEncoder.endBlockEncoding(encodingCtx, out, stream.ourBytes);
+ // Below depends on BAOS internal behavior. toByteArray makes a copy of bytes so far.
+ baos.flush();
+ baosBytes = baos.toByteArray();
+ this.dataBlockEncoder.endBlockEncoding(encodingCtx, out, baosBytes);
} catch (IOException e) {
throw new RuntimeException(String.format(
"Bug in encoding part of algorithm %s. " +
"Probably it requested more bytes than are available.",
toString()), e);
}
- return baos.toByteArray();
- }
-
- private static class BufferGrabbingByteArrayOutputStream extends ByteArrayOutputStream {
- private byte[] ourBytes;
-
- @Override
- public synchronized void write(byte[] b, int off, int len) {
- this.ourBytes = b;
- }
+ return baosBytes;
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
index fecf012..847e2eb 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
@@ -282,7 +282,7 @@ public class MultiByteBuff extends ByteBuff {
return ByteBufferUtils.toShort(item, offsetInItem);
}
if (items.length - 1 == itemIndex) {
- // means cur item is the last one and we wont be able to read a int. Throw exception
+ // means cur item is the last one and we wont be able to read a short. Throw exception
throw new BufferUnderflowException();
}
ByteBuffer nextItem = items[itemIndex + 1];
@@ -294,7 +294,7 @@ public class MultiByteBuff extends ByteBuff {
}
for (int i = 0; i < Bytes.SIZEOF_SHORT - remainingLen; i++) {
l = (short) (l << 8);
- l = (short) (l ^ (ByteBufferUtils.toByte(item, i) & 0xFF));
+ l = (short) (l ^ (ByteBufferUtils.toByte(nextItem, i) & 0xFF));
}
return l;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java
index 16ff404..95c088e 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java
@@ -43,6 +43,25 @@ public class TestMultiByteBuff {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestMultiByteBuff.class);
+ /**
+ * Test right answer though we span many sub-buffers.
+ */
+ @Test
+ public void testGetShort() {
+ ByteBuffer bb1 = ByteBuffer.allocate(1);
+ bb1.put((byte)1);
+ ByteBuffer bb2 = ByteBuffer.allocate(1);
+ bb2.put((byte)0);
+ ByteBuffer bb3 = ByteBuffer.allocate(1);
+ bb3.put((byte)2);
+ ByteBuffer bb4 = ByteBuffer.allocate(1);
+ bb4.put((byte)3);
+ MultiByteBuff mbb = new MultiByteBuff(bb1, bb2, bb3, bb4);
+ assertEquals(256, mbb.getShortAfterPosition(0));
+ assertEquals(2, mbb.getShortAfterPosition(1));
+ assertEquals(515, mbb.getShortAfterPosition(2));
+ }
+
@Test
public void testWritesAndReads() {
// Absolute reads
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 665d223..19efdc7 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -206,7 +206,6 @@ public class ProcedureExecutor<TEnvironment> {
final long now = EnvironmentEdgeManager.currentTime();
final Iterator<Map.Entry<Long, CompletedProcedureRetainer>> it = completed.entrySet().iterator();
- final boolean debugEnabled = LOG.isDebugEnabled();
while (it.hasNext() && store.isRunning()) {
final Map.Entry<Long, CompletedProcedureRetainer> entry = it.next();
final CompletedProcedureRetainer retainer = entry.getValue();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
index c530386..0880238 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
@@ -209,7 +209,6 @@ public abstract class StateMachineProcedure<TEnvironment, TState>
@Override
protected boolean abort(final TEnvironment env) {
- final TState state = getCurrentState();
LOG.debug("Abort requested for {}", this);
if (hasMoreState()) {
aborted.set(true);
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index d60612f..686d578 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -114,6 +114,9 @@ public abstract class RpcServer implements RpcServerInterface,
+ Server.class.getName());
protected SecretManager<TokenIdentifier> secretManager;
protected final Map<String, String> saslProps;
+
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+ justification="Start is synchronized so authManager creation is single-threaded")
protected ServiceAuthorizationManager authManager;
/** This is set to Call object before Handler invokes an RPC and ybdie
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 5e0ce84..b0dd0b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1200,7 +1200,6 @@ public class HMaster extends HRegionServer implements MasterServices {
private void startProcedureExecutor() throws IOException {
final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
-
procedureStore = new WALProcedureStore(conf,
new MasterProcedureEnv.WALStoreLeaseRecovery(this));
procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 0f26bfa..a48ed75 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -1298,7 +1298,6 @@ public class AssignmentManager implements ServerListener {
final Set<ServerName> offlineServersWithOnlineRegions = new HashSet<>();
int size = regionStates.getRegionStateNodes().size();
final List<RegionInfo> offlineRegionsToAssign = new ArrayList<>(size);
- long startTime = System.currentTimeMillis();
// If deadservers then its a failover, else, we are not sure yet.
boolean failover = deadServers;
for (RegionStateNode regionNode: regionStates.getRegionStateNodes()) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 46ec149..cabccbc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -432,6 +432,10 @@ public class SplitTableRegionProcedure
}
RegionInfo parentHRI = node.getRegionInfo();
+ if (parentHRI == null) {
+ LOG.info("Unsplittable; parent region is null; node={}", node);
+ return false;
+ }
// Lookup the parent HRI state from the AM, which has the latest updated info.
// Protect against the case where concurrent SPLIT requests came in and succeeded
// just before us.
@@ -457,8 +461,7 @@ public class SplitTableRegionProcedure
// we are always able to split the region
if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) {
LOG.warn("pid=" + getProcId() + " split switch is off! skip split of " + parentHRI);
- setFailure(new IOException("Split region " +
- (parentHRI == null? "null": parentHRI.getRegionNameAsString()) +
+ setFailure(new IOException("Split region " + parentHRI.getRegionNameAsString() +
" failed due to split switch off"));
return false;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index 9ef7dce..fdf5141 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -50,6 +50,8 @@ import org.slf4j.LoggerFactory;
* Abstract Cleaner that uses a chain of delegates to clean a directory of files
* @param <T> Cleaner delegate class that is dynamically loaded from configuration
*/
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD",
+ justification="TODO: Fix. It is wonky have static pool initialized from instance")
public abstract class CleanerChore<T extends FileCleanerDelegate> extends ScheduledChore
implements ConfigurationObserver {
@@ -67,8 +69,8 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
// It may be waste resources for each cleaner chore own its pool,
// so let's make pool for all cleaner chores.
- private static volatile ForkJoinPool chorePool;
- private static volatile int chorePoolSize;
+ private static volatile ForkJoinPool CHOREPOOL;
+ private static volatile int CHOREPOOLSIZE;
protected final FileSystem fs;
private final Path oldFileDir;
@@ -102,15 +104,14 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
this.params = params;
initCleanerChain(confKey);
- if (chorePool == null) {
+ if (CHOREPOOL == null) {
String poolSize = conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE);
- chorePoolSize = calculatePoolSize(poolSize);
+ CHOREPOOLSIZE = calculatePoolSize(poolSize);
// poolSize may be 0 or 0.0 from a careless configuration,
// double check to make sure.
- chorePoolSize = chorePoolSize == 0 ?
- calculatePoolSize(DEFAULT_CHORE_POOL_SIZE) : chorePoolSize;
- this.chorePool = new ForkJoinPool(chorePoolSize);
- LOG.info("Cleaner pool size is {}", chorePoolSize);
+ CHOREPOOLSIZE = CHOREPOOLSIZE == 0? calculatePoolSize(DEFAULT_CHORE_POOL_SIZE): CHOREPOOLSIZE;
+ this.CHOREPOOL = new ForkJoinPool(CHOREPOOLSIZE);
+ LOG.info("Cleaner pool size is {}", CHOREPOOLSIZE);
}
}
@@ -119,11 +120,11 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
* @param poolSize size from configuration
* @return size of pool after calculation
*/
- int calculatePoolSize(String poolSize) {
+ static int calculatePoolSize(String poolSize) {
if (poolSize.matches("[1-9][0-9]*")) {
// If poolSize is an integer, return it directly,
// but upmost to the number of available processors.
- int size = Math.min(Integer.valueOf(poolSize), AVAIL_PROCESSORS);
+ int size = Math.min(Integer.parseInt(poolSize), AVAIL_PROCESSORS);
if (size == AVAIL_PROCESSORS) {
LOG.warn("Use full core processors to scan dir, size={}", size);
}
@@ -173,12 +174,12 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
@Override
public void onConfigurationChange(Configuration conf) {
int updatedSize = calculatePoolSize(conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE));
- if (updatedSize == chorePoolSize) {
+ if (updatedSize == CHOREPOOLSIZE) {
LOG.trace("Size from configuration is same as previous={}, no need to update.", updatedSize);
return;
}
- chorePoolSize = updatedSize;
- if (chorePool.getPoolSize() == 0) {
+ CHOREPOOLSIZE = updatedSize;
+ if (CHOREPOOL.getPoolSize() == 0) {
// Chore does not work now, update it directly.
updateChorePoolSize(updatedSize);
return;
@@ -188,9 +189,9 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
}
private void updateChorePoolSize(int updatedSize) {
- chorePool.shutdownNow();
- LOG.info("Update chore's pool size from {} to {}", chorePool.getParallelism(), updatedSize);
- chorePool = new ForkJoinPool(updatedSize);
+ CHOREPOOL.shutdownNow();
+ LOG.info("Update chore's pool size from {} to {}", CHOREPOOL.getParallelism(), updatedSize);
+ CHOREPOOL = new ForkJoinPool(updatedSize);
}
/**
@@ -226,7 +227,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
}
// After each clean chore, checks if receives reconfigure notification while cleaning
if (reconfig.compareAndSet(true, false)) {
- updateChorePoolSize(chorePoolSize);
+ updateChorePoolSize(CHOREPOOLSIZE);
}
} else {
LOG.debug("Cleaner chore disabled! Not cleaning.");
@@ -240,7 +241,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
public Boolean runCleaner() {
preRunCleaner();
CleanerTask task = new CleanerTask(this.oldFileDir, true);
- chorePool.submit(task);
+ CHOREPOOL.submit(task);
return task.join();
}
@@ -372,7 +373,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
@VisibleForTesting
int getChorePoolSize() {
- return chorePoolSize;
+ return CHOREPOOLSIZE;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 414bc31..a64d6f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -8111,13 +8111,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
@Override
- @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH",
- justification="Intentional")
public void startRegionOperation(Operation op) throws IOException {
switch (op) {
case GET: // read operations
case SCAN:
checkReadsEnabled();
+ break;
default:
break;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 6e4191e..a0e65ec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -90,6 +90,40 @@ class MemStoreFlusher implements FlushRequester {
private FlushType flushType;
/**
+ * Singleton instance of this class inserted into flush queue.
+ */
+ private static final WakeupFlushThread WAKEUPFLUSH_INSTANCE = new WakeupFlushThread();
+
+ /**
+ * Marker class used as a token inserted into flush queue that ensures the flusher does not sleep.
+ * Create a single instance only.
+ */
+ private static final class WakeupFlushThread implements FlushQueueEntry {
+ private WakeupFlushThread() {}
+
+ @Override
+ public long getDelay(TimeUnit unit) {
+ return 0;
+ }
+
+ @Override
+ public int compareTo(Delayed o) {
+ return -1;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj == this;
+ }
+
+ @Override
+ public int hashCode() {
+ return 42;
+ }
+ }
+
+
+ /**
* @param conf
* @param server
*/
@@ -147,17 +181,18 @@ class MemStoreFlusher implements FlushRequester {
boolean flushedOne = false;
while (!flushedOne) {
- // Find the biggest region that doesn't have too many storefiles
- // (might be null!)
- HRegion bestFlushableRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, true);
+ // Find the biggest region that doesn't have too many storefiles (might be null!)
+ HRegion bestFlushableRegion =
+ getBiggestMemStoreRegion(regionsBySize, excludedRegions, true);
// Find the biggest region, total, even if it might have too many flushes.
- HRegion bestAnyRegion = getBiggestMemStoreRegion(
- regionsBySize, excludedRegions, false);
+ HRegion bestAnyRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, false);
// Find the biggest region that is a secondary region
- HRegion bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize,
- excludedRegions);
-
- if (bestAnyRegion == null && bestRegionReplica == null) {
+ HRegion bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize, excludedRegions);
+ if (bestAnyRegion == null) {
+ // If bestAnyRegion is null, assign replica. It may be null too. Next step is check for null
+ bestAnyRegion = bestRegionReplica;
+ }
+ if (bestAnyRegion == null) {
LOG.error("Above memory mark but there are no flushable regions!");
return false;
}
@@ -169,19 +204,20 @@ class MemStoreFlusher implements FlushRequester {
case ABOVE_OFFHEAP_HIGHER_MARK:
case ABOVE_OFFHEAP_LOWER_MARK:
bestAnyRegionSize = bestAnyRegion.getMemStoreOffHeapSize();
- bestFlushableRegionSize = bestFlushableRegion.getMemStoreOffHeapSize();
+ bestFlushableRegionSize = getMemStoreOffHeapSize(bestFlushableRegion);
break;
+
case ABOVE_ONHEAP_HIGHER_MARK:
case ABOVE_ONHEAP_LOWER_MARK:
bestAnyRegionSize = bestAnyRegion.getMemStoreHeapSize();
- bestFlushableRegionSize = bestFlushableRegion.getMemStoreHeapSize();
+ bestFlushableRegionSize = getMemStoreHeapSize(bestFlushableRegion);
break;
+
default:
bestAnyRegionSize = bestAnyRegion.getMemStoreDataSize();
- bestFlushableRegionSize = bestFlushableRegion.getMemStoreDataSize();
+ bestFlushableRegionSize = getMemStoreDataSize(bestFlushableRegion);
}
- if (bestFlushableRegion != null &&
- bestAnyRegionSize > 2 * bestFlushableRegionSize) {
+ if (bestAnyRegionSize > 2 * bestFlushableRegionSize) {
// Even if it's not supposed to be flushed, pick a region if it's more than twice
// as big as the best flushable one - otherwise when we're under pressure we make
// lots of little flushes and cause lots of compactions, etc, which just makes
@@ -211,21 +247,22 @@ class MemStoreFlusher implements FlushRequester {
case ABOVE_OFFHEAP_HIGHER_MARK:
case ABOVE_OFFHEAP_LOWER_MARK:
regionToFlushSize = regionToFlush.getMemStoreOffHeapSize();
- bestRegionReplicaSize = bestRegionReplica.getMemStoreOffHeapSize();
+ bestRegionReplicaSize = getMemStoreOffHeapSize(bestRegionReplica);
break;
+
case ABOVE_ONHEAP_HIGHER_MARK:
case ABOVE_ONHEAP_LOWER_MARK:
regionToFlushSize = regionToFlush.getMemStoreHeapSize();
- bestRegionReplicaSize = bestRegionReplica.getMemStoreHeapSize();
+ bestRegionReplicaSize = getMemStoreHeapSize(bestRegionReplica);
break;
+
default:
regionToFlushSize = regionToFlush.getMemStoreDataSize();
- bestRegionReplicaSize = bestRegionReplica.getMemStoreDataSize();
+ bestRegionReplicaSize = getMemStoreDataSize(bestRegionReplica);
}
Preconditions.checkState(
- (regionToFlush != null && regionToFlushSize > 0) ||
- (bestRegionReplica != null && bestRegionReplicaSize > 0));
+ (regionToFlush != null && regionToFlushSize > 0) || bestRegionReplicaSize > 0);
if (regionToFlush == null ||
(bestRegionReplica != null &&
@@ -266,6 +303,27 @@ class MemStoreFlusher implements FlushRequester {
return true;
}
+ /**
+ * @return Return memstore offheap size or null if <code>r</code> is null
+ */
+ private static long getMemStoreOffHeapSize(HRegion r) {
+ return r == null? 0: r.getMemStoreOffHeapSize();
+ }
+
+ /**
+ * @return Return memstore heap size or null if <code>r</code> is null
+ */
+ private static long getMemStoreHeapSize(HRegion r) {
+ return r == null? 0: r.getMemStoreHeapSize();
+ }
+
+ /**
+ * @return Return memstore data size or null if <code>r</code> is null
+ */
+ private static long getMemStoreDataSize(HRegion r) {
+ return r == null? 0: r.getMemStoreDataSize();
+ }
+
private class FlushHandler extends HasThread {
private FlushHandler(String name) {
@@ -279,7 +337,7 @@ class MemStoreFlusher implements FlushRequester {
try {
wakeupPending.set(false); // allow someone to wake us up again
fqe = flushQueue.poll(threadWakeFrequency, TimeUnit.MILLISECONDS);
- if (fqe == null || fqe instanceof WakeupFlushThread) {
+ if (fqe == null || fqe == WAKEUPFLUSH_INSTANCE) {
FlushType type = isAboveLowWaterMark();
if (type != FlushType.NORMAL) {
LOG.debug("Flush thread woke up because memory above low water="
@@ -332,7 +390,7 @@ class MemStoreFlusher implements FlushRequester {
private void wakeupFlushThread() {
if (wakeupPending.compareAndSet(false, true)) {
- flushQueue.add(new WakeupFlushThread());
+ flushQueue.add(WAKEUPFLUSH_INSTANCE);
}
}
@@ -760,21 +818,6 @@ class MemStoreFlusher implements FlushRequester {
}
/**
- * Token to insert into the flush queue that ensures that the flusher does not sleep
- */
- static class WakeupFlushThread implements FlushQueueEntry {
- @Override
- public long getDelay(TimeUnit unit) {
- return 0;
- }
-
- @Override
- public int compareTo(Delayed o) {
- return -1;
- }
- }
-
- /**
* Datastructure used in the flush queue. Holds region and retry count.
* Keeps tabs on how old this object is. Implements {@link Delayed}. On
* construction, the delay is zero. When added to a delay queue, we'll come
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 88ce346..7e01c9a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -1207,7 +1207,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
throw new IllegalArgumentException("Failed resolve of " + initialIsa);
}
priority = createPriority();
- String hostname = initialIsa.getHostName();
// Using Address means we don't get the IP too. Shorten it more even to just the host name
// w/o the domain.
String name = rs.getProcessName() + "/" +
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 5ef579b..f3c93dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -1206,6 +1206,8 @@ public class RegionCoprocessorHost
* @return true or false to return to client if default processing should be bypassed,
* or null otherwise
*/
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_BOOLEAN_RETURN_NULL",
+ justification="TODO: Fix")
public Boolean preCheckAndDeleteAfterRowLock(final byte[] row, final byte[] family,
final byte[] qualifier, final CompareOperator op, final ByteArrayComparable comparator,
final Delete delete) throws IOException {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index d22d1ec..e34818f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALKeyImpl;
import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -651,11 +652,12 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
@Override
protected void doReplaceWriter(Path oldPath, Path newPath, AsyncWriter nextWriter)
throws IOException {
+ Preconditions.checkNotNull(nextWriter);
waitForSafePoint();
long oldFileLen = closeWriter();
logRollAndSetupWalProps(oldPath, newPath, oldFileLen);
this.writer = nextWriter;
- if (nextWriter != null && nextWriter instanceof AsyncProtobufLogWriter) {
+ if (nextWriter instanceof AsyncProtobufLogWriter) {
this.fsOut = ((AsyncProtobufLogWriter) nextWriter).getOutput();
}
this.fileLengthAtLastSync = nextWriter.getLength();
http://git-wip-us.apache.org/repos/asf/hbase/blob/b11e5066/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java
index c3372bb..00c788d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java
@@ -336,7 +336,12 @@ public class MajorCompactor {
"ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: "
+ parseException);
printUsage(options);
-
+ return;
+ }
+ if (commandLine == null) {
+ System.out.println("ERROR: Failed parse, empty commandLine; " + Arrays.toString(args));
+ printUsage(options);
+ return;
}
String tableName = commandLine.getOptionValue("table");
String cf = commandLine.getOptionValue("cf", null);
@@ -353,7 +358,7 @@ public class MajorCompactor {
String quorum =
commandLine.getOptionValue("zk", configuration.get(HConstants.ZOOKEEPER_QUORUM));
String rootDir = commandLine.getOptionValue("rootDir", configuration.get(HConstants.HBASE_DIR));
- long sleep = Long.valueOf(commandLine.getOptionValue("sleep", Long.toString(30000)));
+ long sleep = Long.parseLong(commandLine.getOptionValue("sleep", Long.toString(30000)));
configuration.set(HConstants.HBASE_DIR, rootDir);
configuration.set(HConstants.ZOOKEEPER_QUORUM, quorum);
[16/20] hbase git commit: HBASE-20074 [FindBugs] Same code on both
branches in CompactingMemStore#initMemStoreCompactor
Posted by bu...@apache.org.
HBASE-20074 [FindBugs] Same code on both branches in CompactingMemStore#initMemStoreCompactor
Signed-off-by: Michael Stack <st...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0fa5d69f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0fa5d69f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0fa5d69f
Branch: refs/heads/HBASE-15151
Commit: 0fa5d69fc7602f2760a64e1760fd02be130aa8bd
Parents: e4ce38d
Author: gsheffi <gs...@yahoo-inc.com>
Authored: Mon Feb 26 11:18:38 2018 +0200
Committer: Sean Busbey <bu...@apache.org>
Committed: Tue Feb 27 11:21:40 2018 -0600
----------------------------------------------------------------------
.../hadoop/hbase/regionserver/CompactingMemStore.java | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0fa5d69f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 44b40eb..d60b049 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -136,14 +136,10 @@ public class CompactingMemStore extends AbstractMemStore {
numStores = 1;
}
inmemoryFlushSize = memstoreFlushSize / numStores;
- // multiply by a factor (different factors for different index types)
- if (indexType == IndexType.ARRAY_MAP) {
- factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
+ // multiply by a factor (the same factor for all index types)
+ factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
- } else {
- factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
- IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
- }
+
inmemoryFlushSize = (long) (inmemoryFlushSize * factor);
LOG.info("Setting in-memory flush size threshold to {} and immutable segments index to type={}",
StringUtils.byteDesc(inmemoryFlushSize), indexType);
[07/20] hbase git commit: for creating patch HBASE-20074-V01.patch
Posted by bu...@apache.org.
for creating patch HBASE-20074-V01.patch
Signed-off-by: Michael Stack <st...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/73028d5b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/73028d5b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/73028d5b
Branch: refs/heads/HBASE-15151
Commit: 73028d5bd9f85655b284654579ddcbbca31e41e8
Parents: 7cfb464
Author: gsheffi <gs...@yahoo-inc.com>
Authored: Mon Feb 26 11:18:38 2018 +0200
Committer: Michael Stack <st...@apache.org>
Committed: Mon Feb 26 09:57:22 2018 -0800
----------------------------------------------------------------------
.../hadoop/hbase/regionserver/CompactingMemStore.java | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/73028d5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 44b40eb..d60b049 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -136,14 +136,10 @@ public class CompactingMemStore extends AbstractMemStore {
numStores = 1;
}
inmemoryFlushSize = memstoreFlushSize / numStores;
- // multiply by a factor (different factors for different index types)
- if (indexType == IndexType.ARRAY_MAP) {
- factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
+ // multiply by a factor (the same factor for all index types)
+ factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
- } else {
- factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
- IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
- }
+
inmemoryFlushSize = (long) (inmemoryFlushSize * factor);
LOG.info("Setting in-memory flush size threshold to {} and immutable segments index to type={}",
StringUtils.byteDesc(inmemoryFlushSize), indexType);
[19/20] hbase git commit: HBASE-15151 ensure findbugs check runs on
all branches.
Posted by bu...@apache.org.
HBASE-15151 ensure findbugs check runs on all branches.
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f3bedc7a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f3bedc7a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f3bedc7a
Branch: refs/heads/HBASE-15151
Commit: f3bedc7a700e5fe1eac216c628c3adc884debf2f
Parents: dd7c231
Author: Sean Busbey <bu...@apache.org>
Authored: Sun Feb 25 00:35:45 2018 -0600
Committer: Sean Busbey <bu...@apache.org>
Committed: Tue Feb 27 11:28:24 2018 -0600
----------------------------------------------------------------------
dev-support/Jenkinsfile | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/f3bedc7a/dev-support/Jenkinsfile
----------------------------------------------------------------------
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 22f3f21..6e37c70 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -239,7 +239,7 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
}
}
environment {
- TESTS = 'mvninstall,compile,javac,unit,htmlout'
+ TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
// This isn't strictly needed on branches that only support jdk8, but doesn't hurt
@@ -305,10 +305,7 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
}
}
environment {
- // Failure in any stage fails the build and consecutive stages are not built.
- // Findbugs is part of this last yetus stage to prevent findbugs precluding hadoop3
- // tests.
- TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
+ TESTS = 'mvninstall,compile,javac,unit,htmlout'
OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
// This isn't strictly needed on branches that only support jdk8, but doesn't hurt
[13/20] hbase git commit: HBASE-20089 Use the ASF recommended naming
for SHA512 xsum files
Posted by bu...@apache.org.
HBASE-20089 Use the ASF recommended naming for SHA512 xsum files
Signed-off-by: Andrew Purtell <ap...@apache.org>
Signed-off-by: Michael Stack <st...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e47d1e44
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e47d1e44
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e47d1e44
Branch: refs/heads/HBASE-15151
Commit: e47d1e443e17408cf2131e4490397a7cee18288e
Parents: dbd8013
Author: Josh Elser <el...@apache.org>
Authored: Mon Feb 26 16:30:16 2018 -0500
Committer: Josh Elser <el...@apache.org>
Committed: Tue Feb 27 09:51:10 2018 -0500
----------------------------------------------------------------------
dev-support/make_rc.sh | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/e47d1e44/dev-support/make_rc.sh
----------------------------------------------------------------------
diff --git a/dev-support/make_rc.sh b/dev-support/make_rc.sh
index f067ee9..640bcd1 100755
--- a/dev-support/make_rc.sh
+++ b/dev-support/make_rc.sh
@@ -107,13 +107,13 @@ build_bin
MAVEN_OPTS="${mvnopts}" ${mvn} deploy -DskipTests -Papache-release -Prelease \
-Dmaven.repo.local=${output_dir}/repository
-# Do sha1 and md5
+# Do sha512 and md5
cd ${output_dir}
-for i in *.tar.gz; do echo $i; gpg --print-md SHA512 $i > $i.sha ; done
+for i in *.tar.gz; do echo $i; gpg --print-md SHA512 $i > $i.sha512 ; done
for i in *.tar.gz; do echo $i; gpg --print-md MD5 $i > $i.md5 ; done
echo "Check the content of ${output_dir}. If good, sign and push to dist.apache.org"
echo " cd ${output_dir}"
echo ' for i in *.tar.gz; do echo $i; gpg --armor --output $i.asc --detach-sig $i ; done'
-echo ' rsync -av ${output_dir}/*.gz ${output_dir}/*.md5 ${output_dir}/*.sha ${output_dir}/*.asc ${APACHE_HBASE_DIST_DEV_DIR}/${hbase_name}/'
+echo ' rsync -av ${output_dir}/*.gz ${output_dir}/*.md5 ${output_dir}/*.sha512 ${output_dir}/*.asc ${APACHE_HBASE_DIST_DEV_DIR}/${hbase_name}/'
echo "Check the content deployed to maven. If good, close the repo and record links of temporary staging repo"
[05/20] hbase git commit: HBASE-19974 Fix decommissioned servers
cannot be removed by remove_servers_rsgroup methods
Posted by bu...@apache.org.
HBASE-19974 Fix decommissioned servers cannot be removed by remove_servers_rsgroup methods
Signed-off-by: tedyu <yu...@gmail.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a29b3caf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a29b3caf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a29b3caf
Branch: refs/heads/HBASE-15151
Commit: a29b3caf4dbc7b8833474ef5da5438f7f6907e00
Parents: 2beda62
Author: haxiaolin <ha...@xiaomi.com>
Authored: Mon Feb 26 14:25:01 2018 +0800
Committer: tedyu <yu...@gmail.com>
Committed: Mon Feb 26 07:30:56 2018 -0800
----------------------------------------------------------------------
.../hbase/rsgroup/RSGroupAdminServer.java | 8 +++-
.../hadoop/hbase/rsgroup/TestRSGroups.java | 7 ++-
.../hadoop/hbase/rsgroup/TestRSGroupsBase.java | 46 ++++++++++++--------
3 files changed, 40 insertions(+), 21 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/a29b3caf/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index aba57fe..094fc1d 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -675,8 +675,12 @@ public class RSGroupAdminServer implements RSGroupAdmin {
private void checkForDeadOrOnlineServers(Set<Address> servers) throws ConstraintException {
// This uglyness is because we only have Address, not ServerName.
Set<Address> onlineServers = new HashSet<>();
- for(ServerName server: master.getServerManager().getOnlineServers().keySet()) {
- onlineServers.add(server.getAddress());
+ List<ServerName> drainingServers = master.getServerManager().getDrainingServersList();
+ for (ServerName server : master.getServerManager().getOnlineServers().keySet()) {
+ // Only online but not decommissioned servers are really online
+ if (!drainingServers.contains(server)) {
+ onlineServers.add(server.getAddress());
+ }
}
Set<Address> deadServers = new HashSet<>();
http://git-wip-us.apache.org/repos/asf/hbase/blob/a29b3caf/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
index 9116f3b..610278a 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.net.Address;
@@ -66,7 +65,6 @@ public class TestRSGroups extends TestRSGroupsBase {
HBaseClassTestRule.forClass(TestRSGroups.class);
protected static final Logger LOG = LoggerFactory.getLogger(TestRSGroups.class);
- private static HMaster master;
private static boolean INIT = false;
private static RSGroupAdminEndpoint rsGroupAdminEndpoint;
@@ -126,6 +124,11 @@ public class TestRSGroups extends TestRSGroupsBase {
deleteNamespaceIfNecessary();
deleteGroups();
+ for(ServerName sn : admin.listDecommissionedRegionServers()){
+ admin.recommissionRegionServer(sn, null);
+ }
+ assertTrue(admin.listDecommissionedRegionServers().isEmpty());
+
int missing = NUM_SLAVES_BASE - getNumServers();
LOG.info("Restoring servers: "+missing);
for(int i=0; i<missing; i++) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/a29b3caf/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index c3f7eef..76bcd20 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -25,8 +25,10 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.security.SecureRandom;
+import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashSet;
+import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -81,9 +83,11 @@ public abstract class TestRSGroupsBase {
protected static Admin admin;
protected static HBaseCluster cluster;
protected static RSGroupAdmin rsGroupAdmin;
+ protected static HMaster master;
public final static long WAIT_TIMEOUT = 60000*5;
public final static int NUM_SLAVES_BASE = 4; //number of slaves for the smallest cluster
+ public static int NUM_DEAD_SERVERS = 0;
// Per test variables
TableName tableName;
@@ -271,10 +275,10 @@ public abstract class TestRSGroupsBase {
public int getNumServers() throws IOException {
ClusterMetrics status =
admin.getClusterMetrics(EnumSet.of(Option.MASTER, Option.LIVE_SERVERS));
- ServerName master = status.getMasterName();
+ ServerName masterName = status.getMasterName();
int count = 0;
for (ServerName sn : status.getLiveServerMetrics().keySet()) {
- if (!sn.equals(master)) {
+ if (!sn.equals(masterName)) {
count++;
}
}
@@ -885,6 +889,7 @@ public abstract class TestRSGroupsBase {
public void testClearDeadServers() throws Exception {
LOG.info("testClearDeadServers");
final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 3);
+ NUM_DEAD_SERVERS = cluster.getClusterMetrics().getDeadServerNames().size();
ServerName targetServer = ServerName.parseServerName(
newGroup.getServers().iterator().next().toString());
@@ -897,15 +902,15 @@ public abstract class TestRSGroupsBase {
//due to the connection loss
targetRS.stopServer(null,
AdminProtos.StopServerRequest.newBuilder().setReason("Die").build());
+ NUM_DEAD_SERVERS ++;
} catch(Exception e) {
}
- HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
//wait for stopped regionserver to dead server list
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return !master.getServerManager().areDeadServersInProgress()
- && cluster.getClusterMetrics().getDeadServerNames().size() > 0;
+ && cluster.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS;
}
});
assertFalse(cluster.getClusterMetrics().getLiveServerMetrics().containsKey(targetServer));
@@ -925,8 +930,10 @@ public abstract class TestRSGroupsBase {
public void testRemoveServers() throws Exception {
LOG.info("testRemoveServers");
final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 3);
- ServerName targetServer = ServerName.parseServerName(
- newGroup.getServers().iterator().next().toString());
+ Iterator<Address> iterator = newGroup.getServers().iterator();
+ ServerName targetServer = ServerName.parseServerName(iterator.next().toString());
+
+ // remove online servers
try {
rsGroupAdmin.removeServers(Sets.newHashSet(targetServer.getAddress()));
fail("Online servers shouldn't have been successfully removed.");
@@ -938,6 +945,8 @@ public abstract class TestRSGroupsBase {
}
assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
+ // remove dead servers
+ NUM_DEAD_SERVERS = cluster.getClusterMetrics().getDeadServerNames().size();
AdminProtos.AdminService.BlockingInterface targetRS =
((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
try {
@@ -945,18 +954,19 @@ public abstract class TestRSGroupsBase {
GetServerInfoRequest.newBuilder().build()).getServerInfo().getServerName());
//stopping may cause an exception
//due to the connection loss
+ LOG.info("stopping server " + targetServer.getHostAndPort());
targetRS.stopServer(null,
AdminProtos.StopServerRequest.newBuilder().setReason("Die").build());
+ NUM_DEAD_SERVERS ++;
} catch(Exception e) {
}
- HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
//wait for stopped regionserver to dead server list
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return !master.getServerManager().areDeadServersInProgress()
- && cluster.getClusterMetrics().getDeadServerNames().size() > 0;
+ && cluster.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS;
}
});
@@ -971,17 +981,19 @@ public abstract class TestRSGroupsBase {
}
assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
- ServerName sn = TEST_UTIL.getHBaseClusterInterface().getClusterMetrics().getMasterName();
- TEST_UTIL.getHBaseClusterInterface().stopMaster(sn);
- TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(sn, 60000);
- TEST_UTIL.getHBaseClusterInterface().startMaster(sn.getHostname(), 0);
- TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster(60000);
+ // remove decommissioned servers
+ List<ServerName> serversToDecommission = new ArrayList<>();
+ targetServer = ServerName.parseServerName(iterator.next().toString());
+ targetRS = ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
+ targetServer = ProtobufUtil.toServerName(targetRS.getServerInfo(null,
+ GetServerInfoRequest.newBuilder().build()).getServerInfo().getServerName());
+ assertTrue(master.getServerManager().getOnlineServers().containsKey(targetServer));
+ serversToDecommission.add(targetServer);
- assertEquals(3, cluster.getClusterMetrics().getLiveServerMetrics().size());
- assertFalse(cluster.getClusterMetrics().getLiveServerMetrics().containsKey(targetServer));
- assertFalse(cluster.getClusterMetrics().getDeadServerNames().contains(targetServer));
- assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
+ admin.decommissionRegionServers(serversToDecommission, true);
+ assertEquals(1, admin.listDecommissionedRegionServers().size());
+ assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
rsGroupAdmin.removeServers(Sets.newHashSet(targetServer.getAddress()));
Set<Address> newGroupServers = rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getServers();
assertFalse(newGroupServers.contains(targetServer.getAddress()));
[11/20] hbase git commit: HBASE-20066 Region sequence id may go
backward after split or merge
Posted by bu...@apache.org.
HBASE-20066 Region sequence id may go backward after split or merge
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f06a89b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f06a89b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f06a89b5
Branch: refs/heads/HBASE-15151
Commit: f06a89b5317a8e3d4fb220eb462b083bc6e0b5c8
Parents: d272ac9
Author: zhangduo <zh...@apache.org>
Authored: Mon Feb 26 20:22:57 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Tue Feb 27 15:33:07 2018 +0800
----------------------------------------------------------------------
.../src/main/protobuf/MasterProcedure.proto | 22 ++-
.../assignment/MergeTableRegionsProcedure.java | 181 +++++++++----------
.../assignment/SplitTableRegionProcedure.java | 150 +++++++--------
.../AbstractStateMachineTableProcedure.java | 14 +-
.../hadoop/hbase/regionserver/HRegion.java | 18 +-
.../apache/hadoop/hbase/wal/WALSplitter.java | 109 ++++++-----
.../TestSequenceIdMonotonicallyIncreasing.java | 156 ++++++++++++++++
.../hadoop/hbase/master/AbstractTestDLS.java | 36 ----
.../hbase/wal/TestReadWriteSeqIdFiles.java | 95 ++++++++++
9 files changed, 504 insertions(+), 277 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/f06a89b5/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 1ab51e5..e785f96 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -241,11 +241,12 @@ enum SplitTableRegionState {
SPLIT_TABLE_REGION_PRE_OPERATION = 2;
SPLIT_TABLE_REGION_CLOSE_PARENT_REGION = 3;
SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 4;
- SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META = 5;
- SPLIT_TABLE_REGION_UPDATE_META = 6;
- SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META = 7;
- SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 8;
- SPLIT_TABLE_REGION_POST_OPERATION = 9;
+ SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE = 5;
+ SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META = 6;
+ SPLIT_TABLE_REGION_UPDATE_META = 7;
+ SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META = 8;
+ SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9;
+ SPLIT_TABLE_REGION_POST_OPERATION = 10;
}
message SplitTableRegionStateData {
@@ -260,11 +261,12 @@ enum MergeTableRegionsState {
MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION = 3;
MERGE_TABLE_REGIONS_CLOSE_REGIONS = 4;
MERGE_TABLE_REGIONS_CREATE_MERGED_REGION = 5;
- MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION = 6;
- MERGE_TABLE_REGIONS_UPDATE_META = 7;
- MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION = 8;
- MERGE_TABLE_REGIONS_OPEN_MERGED_REGION = 9;
- MERGE_TABLE_REGIONS_POST_OPERATION = 10;
+ MERGE_TABLE_REGIONS_WRITE_MAX_SEQUENCE_ID_FILE = 6;
+ MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION = 7;
+ MERGE_TABLE_REGIONS_UPDATE_META = 8;
+ MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION = 9;
+ MERGE_TABLE_REGIONS_OPEN_MERGED_REGION = 10;
+ MERGE_TABLE_REGIONS_POST_OPERATION = 11;
}
message MergeTableRegionsStateData {
http://git-wip-us.apache.org/repos/asf/hbase/blob/f06a89b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 1c448dc..7c041e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -204,55 +205,55 @@ public class MergeTableRegionsProcedure
}
@Override
- protected Flow executeFromState(
- final MasterProcedureEnv env,
- final MergeTableRegionsState state)
- throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
- if (LOG.isDebugEnabled()) {
- LOG.debug(this + " execute state=" + state);
- }
+ protected Flow executeFromState(final MasterProcedureEnv env, final MergeTableRegionsState state)
+ throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
+ LOG.trace("{} execute state={}", this, state);
try {
switch (state) {
- case MERGE_TABLE_REGIONS_PREPARE:
- if (!prepareMergeRegion(env)) {
- assert isFailed() : "Merge region should have an exception here";
+ case MERGE_TABLE_REGIONS_PREPARE:
+ if (!prepareMergeRegion(env)) {
+ assert isFailed() : "Merge region should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+ setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION);
+ break;
+ case MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION:
+ preMergeRegions(env);
+ setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_CLOSE_REGIONS);
+ break;
+ case MERGE_TABLE_REGIONS_CLOSE_REGIONS:
+ addChildProcedure(createUnassignProcedures(env, getRegionReplication(env)));
+ setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_CREATE_MERGED_REGION);
+ break;
+ case MERGE_TABLE_REGIONS_CREATE_MERGED_REGION:
+ createMergedRegion(env);
+ setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_WRITE_MAX_SEQUENCE_ID_FILE);
+ break;
+ case MERGE_TABLE_REGIONS_WRITE_MAX_SEQUENCE_ID_FILE:
+ writeMaxSequenceIdFile(env);
+ setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION);
+ break;
+ case MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION:
+ preMergeRegionsCommit(env);
+ setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_UPDATE_META);
+ break;
+ case MERGE_TABLE_REGIONS_UPDATE_META:
+ updateMetaForMergedRegions(env);
+ setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION);
+ break;
+ case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
+ postMergeRegionsCommit(env);
+ setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_OPEN_MERGED_REGION);
+ break;
+ case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
+ addChildProcedure(createAssignProcedures(env, getRegionReplication(env)));
+ setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_POST_OPERATION);
+ break;
+ case MERGE_TABLE_REGIONS_POST_OPERATION:
+ postCompletedMergeRegions(env);
return Flow.NO_MORE_STATE;
- }
- setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION);
- break;
- case MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION:
- preMergeRegions(env);
- setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_CLOSE_REGIONS);
- break;
- case MERGE_TABLE_REGIONS_CLOSE_REGIONS:
- addChildProcedure(createUnassignProcedures(env, getRegionReplication(env)));
- setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_CREATE_MERGED_REGION);
- break;
- case MERGE_TABLE_REGIONS_CREATE_MERGED_REGION:
- createMergedRegion(env);
- setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION);
- break;
- case MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION:
- preMergeRegionsCommit(env);
- setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_UPDATE_META);
- break;
- case MERGE_TABLE_REGIONS_UPDATE_META:
- updateMetaForMergedRegions(env);
- setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION);
- break;
- case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
- postMergeRegionsCommit(env);
- setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_OPEN_MERGED_REGION);
- break;
- case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
- addChildProcedure(createAssignProcedures(env, getRegionReplication(env)));
- setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_POST_OPERATION);
- break;
- case MERGE_TABLE_REGIONS_POST_OPERATION:
- postCompletedMergeRegions(env);
- return Flow.NO_MORE_STATE;
- default:
- throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
}
} catch (IOException e) {
String msg = "Error trying to merge regions " +
@@ -285,31 +286,32 @@ public class MergeTableRegionsProcedure
try {
switch (state) {
- case MERGE_TABLE_REGIONS_POST_OPERATION:
- case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
- case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
- case MERGE_TABLE_REGIONS_UPDATE_META:
- String msg = this + " We are in the " + state + " state."
- + " It is complicated to rollback the merge operation that region server is working on."
- + " Rollback is not supported and we should let the merge operation to complete";
- LOG.warn(msg);
- // PONR
- throw new UnsupportedOperationException(this + " unhandled state=" + state);
- case MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION:
- break;
- case MERGE_TABLE_REGIONS_CREATE_MERGED_REGION:
- cleanupMergedRegion(env);
- break;
- case MERGE_TABLE_REGIONS_CLOSE_REGIONS:
- rollbackCloseRegionsForMerge(env);
- break;
- case MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION:
- postRollBackMergeRegions(env);
- break;
- case MERGE_TABLE_REGIONS_PREPARE:
- break;
- default:
- throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ case MERGE_TABLE_REGIONS_POST_OPERATION:
+ case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
+ case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
+ case MERGE_TABLE_REGIONS_UPDATE_META:
+ String msg = this + " We are in the " + state + " state." +
+ " It is complicated to rollback the merge operation that region server is working on." +
+ " Rollback is not supported and we should let the merge operation to complete";
+ LOG.warn(msg);
+ // PONR
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ case MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION:
+ break;
+ case MERGE_TABLE_REGIONS_CREATE_MERGED_REGION:
+ case MERGE_TABLE_REGIONS_WRITE_MAX_SEQUENCE_ID_FILE:
+ cleanupMergedRegion(env);
+ break;
+ case MERGE_TABLE_REGIONS_CLOSE_REGIONS:
+ rollbackCloseRegionsForMerge(env);
+ break;
+ case MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION:
+ postRollBackMergeRegions(env);
+ break;
+ case MERGE_TABLE_REGIONS_PREPARE:
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
}
} catch (Exception e) {
// This will be retried. Unless there is a bug in the code,
@@ -326,10 +328,10 @@ public class MergeTableRegionsProcedure
@Override
protected boolean isRollbackSupported(final MergeTableRegionsState state) {
switch (state) {
- case MERGE_TABLE_REGIONS_POST_OPERATION:
- case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
- case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
- case MERGE_TABLE_REGIONS_UPDATE_META:
+ case MERGE_TABLE_REGIONS_POST_OPERATION:
+ case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
+ case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
+ case MERGE_TABLE_REGIONS_UPDATE_META:
// It is not safe to rollback if we reach to these states.
return false;
default:
@@ -562,7 +564,6 @@ public class MergeTableRegionsProcedure
/**
* Set the region states to MERGING state
* @param env MasterProcedureEnv
- * @throws IOException
*/
public void setRegionStateToMerging(final MasterProcedureEnv env) throws IOException {
// Set State.MERGING to regions to be merged
@@ -572,22 +573,8 @@ public class MergeTableRegionsProcedure
}
/**
- * Rollback the region state change
- * Not used for now, since rollbackCloseRegionsForMerge() will mark regions as OPEN
- * @param env MasterProcedureEnv
- * @throws IOException
- */
- private void setRegionStateBackToOpen(final MasterProcedureEnv env) throws IOException {
- // revert region state to Open
- RegionStates regionStates = env.getAssignmentManager().getRegionStates();
- regionStates.getRegionStateNode(regionsToMerge[0]).setState(State.OPEN);
- regionStates.getRegionStateNode(regionsToMerge[1]).setState(State.OPEN);
- }
-
- /**
* Create merged region
* @param env MasterProcedureEnv
- * @throws IOException
*/
private void createMergedRegion(final MasterProcedureEnv env) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
@@ -614,7 +601,6 @@ public class MergeTableRegionsProcedure
* @param env MasterProcedureEnv
* @param regionFs region file system
* @param mergedDir the temp directory of merged region
- * @throws IOException
*/
private void mergeStoreFiles(
final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir)
@@ -642,7 +628,6 @@ public class MergeTableRegionsProcedure
/**
* Clean up merged region
* @param env MasterProcedureEnv
- * @throws IOException
*/
private void cleanupMergedRegion(final MasterProcedureEnv env) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
@@ -777,6 +762,18 @@ public class MergeTableRegionsProcedure
return regionLocation;
}
+ private void writeMaxSequenceIdFile(MasterProcedureEnv env) throws IOException {
+ FileSystem fs = env.getMasterServices().getMasterFileSystem().getFileSystem();
+ long maxSequenceId = -1L;
+ for (RegionInfo region : regionsToMerge) {
+ maxSequenceId =
+ Math.max(maxSequenceId, WALSplitter.getMaxRegionSequenceId(fs, getRegionDir(env, region)));
+ }
+ if (maxSequenceId > 0) {
+ WALSplitter.writeRegionSequenceIdFile(fs, getRegionDir(env, mergedRegion), maxSequenceId);
+ }
+ }
+
/**
* The procedure could be restarted from a different machine. If the variable is null, we need to
* retrieve it.
http://git-wip-us.apache.org/repos/asf/hbase/blob/f06a89b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index cabccbc..70ddbe5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -214,52 +215,54 @@ public class SplitTableRegionProcedure
@Override
protected Flow executeFromState(final MasterProcedureEnv env, final SplitTableRegionState state)
throws InterruptedException {
- if (isTraceEnabled()) {
- LOG.trace(this + " execute state=" + state);
- }
+ LOG.trace("{} execute state={}", this, state);
try {
switch (state) {
- case SPLIT_TABLE_REGION_PREPARE:
- if (prepareSplitRegion(env)) {
- setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION);
+ case SPLIT_TABLE_REGION_PREPARE:
+ if (prepareSplitRegion(env)) {
+ setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION);
+ break;
+ } else {
+ return Flow.NO_MORE_STATE;
+ }
+ case SPLIT_TABLE_REGION_PRE_OPERATION:
+ preSplitRegion(env);
+ setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CLOSE_PARENT_REGION);
+ break;
+ case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
+ addChildProcedure(createUnassignProcedures(env, getRegionReplication(env)));
+ setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS);
+ break;
+ case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
+ createDaughterRegions(env);
+ setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE);
+ break;
+ case SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE:
+ writeMaxSequenceIdFile(env);
+ setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META);
+ break;
+ case SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
+ preSplitRegionBeforeMETA(env);
+ setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_UPDATE_META);
break;
- } else {
+ case SPLIT_TABLE_REGION_UPDATE_META:
+ updateMetaForDaughterRegions(env);
+ setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META);
+ break;
+ case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
+ preSplitRegionAfterMETA(env);
+ setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS);
+ break;
+ case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
+ addChildProcedure(createAssignProcedures(env, getRegionReplication(env)));
+ setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_POST_OPERATION);
+ break;
+ case SPLIT_TABLE_REGION_POST_OPERATION:
+ postSplitRegion(env);
return Flow.NO_MORE_STATE;
- }
- case SPLIT_TABLE_REGION_PRE_OPERATION:
- preSplitRegion(env);
- setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CLOSE_PARENT_REGION);
- break;
- case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
- addChildProcedure(createUnassignProcedures(env, getRegionReplication(env)));
- setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS);
- break;
- case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
- createDaughterRegions(env);
- setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META);
- break;
- case SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
- preSplitRegionBeforeMETA(env);
- setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_UPDATE_META);
- break;
- case SPLIT_TABLE_REGION_UPDATE_META:
- updateMetaForDaughterRegions(env);
- setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META);
- break;
- case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
- preSplitRegionAfterMETA(env);
- setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS);
- break;
- case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
- addChildProcedure(createAssignProcedures(env, getRegionReplication(env)));
- setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_POST_OPERATION);
- break;
- case SPLIT_TABLE_REGION_POST_OPERATION:
- postSplitRegion(env);
- return Flow.NO_MORE_STATE;
- default:
- throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
}
} catch (IOException e) {
String msg = "Error trying to split region " + getParentRegion().getEncodedName() +
@@ -291,27 +294,28 @@ public class SplitTableRegionProcedure
try {
switch (state) {
- case SPLIT_TABLE_REGION_POST_OPERATION:
- case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
- case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
- case SPLIT_TABLE_REGION_UPDATE_META:
- // PONR
- throw new UnsupportedOperationException(this + " unhandled state=" + state);
- case SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
- break;
- case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
- // Doing nothing, as re-open parent region would clean up daughter region directories.
- break;
- case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
- openParentRegion(env);
- break;
- case SPLIT_TABLE_REGION_PRE_OPERATION:
- postRollBackSplitRegion(env);
- break;
- case SPLIT_TABLE_REGION_PREPARE:
- break; // nothing to do
- default:
- throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ case SPLIT_TABLE_REGION_POST_OPERATION:
+ case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
+ case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
+ case SPLIT_TABLE_REGION_UPDATE_META:
+ // PONR
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ case SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
+ break;
+ case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
+ case SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE:
+ // Doing nothing, as re-open parent region would clean up daughter region directories.
+ break;
+ case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
+ openParentRegion(env);
+ break;
+ case SPLIT_TABLE_REGION_PRE_OPERATION:
+ postRollBackSplitRegion(env);
+ break;
+ case SPLIT_TABLE_REGION_PREPARE:
+ break; // nothing to do
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
}
} catch (IOException e) {
// This will be retried. Unless there is a bug in the code,
@@ -415,11 +419,11 @@ public class SplitTableRegionProcedure
return daughter_2_RI.getStartKey();
}
- private static State [] EXPECTED_SPLIT_STATES = new State [] {State.OPEN, State.CLOSED};
+ private static final State[] EXPECTED_SPLIT_STATES = new State[] { State.OPEN, State.CLOSED };
+
/**
* Prepare to Split region.
* @param env MasterProcedureEnv
- * @throws IOException
*/
@VisibleForTesting
public boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOException {
@@ -475,8 +479,6 @@ public class SplitTableRegionProcedure
/**
* Action before splitting region in a table.
* @param env MasterProcedureEnv
- * @throws IOException
- * @throws InterruptedException
*/
private void preSplitRegion(final MasterProcedureEnv env)
throws IOException, InterruptedException {
@@ -499,7 +501,6 @@ public class SplitTableRegionProcedure
/**
* Action after rollback a split table region action.
* @param env MasterProcedureEnv
- * @throws IOException
*/
private void postRollBackSplitRegion(final MasterProcedureEnv env) throws IOException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
@@ -511,7 +512,7 @@ public class SplitTableRegionProcedure
/**
* Rollback close parent region
* @param env MasterProcedureEnv
- **/
+ */
private void openParentRegion(final MasterProcedureEnv env) throws IOException {
// Check whether the region is closed; if so, open it in the same server
final int regionReplication = getRegionReplication(env);
@@ -528,7 +529,6 @@ public class SplitTableRegionProcedure
/**
* Create daughter regions
* @param env MasterProcedureEnv
- * @throws IOException
*/
@VisibleForTesting
public void createDaughterRegions(final MasterProcedureEnv env) throws IOException {
@@ -558,7 +558,6 @@ public class SplitTableRegionProcedure
/**
* Create Split directory
* @param env MasterProcedureEnv
- * @throws IOException
*/
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env,
final HRegionFileSystem regionFs) throws IOException {
@@ -756,7 +755,6 @@ public class SplitTableRegionProcedure
/**
* Add daughter regions to META
* @param env MasterProcedureEnv
- * @throws IOException
*/
private void updateMetaForDaughterRegions(final MasterProcedureEnv env) throws IOException {
env.getAssignmentManager().markRegionAsSplit(getParentRegion(), getParentRegionServerName(env),
@@ -823,6 +821,16 @@ public class SplitTableRegionProcedure
return htd.getRegionReplication();
}
+ private void writeMaxSequenceIdFile(MasterProcedureEnv env) throws IOException {
+ FileSystem fs = env.getMasterServices().getMasterFileSystem().getFileSystem();
+ long maxSequenceId =
+ WALSplitter.getMaxRegionSequenceId(fs, getRegionDir(env, getParentRegion()));
+ if (maxSequenceId > 0) {
+ WALSplitter.writeRegionSequenceIdFile(fs, getRegionDir(env, daughter_1_RI), maxSequenceId);
+ WALSplitter.writeRegionSequenceIdFile(fs, getRegionDir(env, daughter_2_RI), maxSequenceId);
+ }
+ }
+
/**
* The procedure could be restarted from a different machine. If the variable is null, we need to
* retrieve it.
http://git-wip-us.apache.org/repos/asf/hbase/blob/f06a89b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
index d67d9f9..833b659 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
@@ -19,13 +19,17 @@
package org.apache.hadoop.hbase.master.procedure;
import java.io.IOException;
-
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Base class for all the Table procedures that want to use a StateMachineProcedure.
@@ -114,4 +118,10 @@ public abstract class AbstractStateMachineTableProcedure<TState>
throw new TableNotFoundException(getTableName());
}
}
+
+ protected final Path getRegionDir(MasterProcedureEnv env, RegionInfo region) throws IOException {
+ MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), getTableName());
+ return new Path(tableDir, ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName());
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f06a89b5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index a64d6f1..9f3d9bd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -947,15 +947,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// Use maximum of log sequenceid or that which was found in stores
// (particularly if no recovered edits, seqid will be -1).
- long nextSeqid = maxSeqId;
- if (this.writestate.writesEnabled) {
- nextSeqid = WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
- this.fs.getRegionDir(), nextSeqid, 1);
- } else {
- nextSeqid++;
+ long maxSeqIdFromFile =
+ WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
+ long nextSeqId = Math.max(maxSeqId, maxSeqIdFromFile) + 1;
+ if (writestate.writesEnabled) {
+ WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), nextSeqId);
}
- LOG.info("Opened {}; next sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqid);
+ LOG.info("Opened {}; next sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
// A region can be reopened if failed a split; reset flags
this.closing.set(false);
@@ -967,7 +966,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
status.markComplete("Region opened successfully");
- return nextSeqid;
+ return nextSeqId;
}
/**
@@ -1103,7 +1102,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// table is still online
if (this.fs.getFileSystem().exists(this.fs.getRegionDir())) {
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs.getRegionDir(),
- mvcc.getReadPoint(), 0);
+ mvcc.getReadPoint());
}
}
@@ -7014,7 +7013,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* Open HRegion.
* Calls initialize and sets sequenceId.
* @return Returns <code>this</code>
- * @throws IOException
*/
protected HRegion openHRegion(final CancelableProgressable reporter)
throws IOException {
http://git-wip-us.apache.org/repos/asf/hbase/blob/f06a89b5/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 3f64d75..6c77c4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -47,7 +46,6 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.ArrayUtils;
@@ -89,13 +87,14 @@ import org.apache.hadoop.hbase.wal.WALProvider.Writer;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
@@ -623,70 +622,70 @@ public class WALSplitter {
|| file.getName().endsWith(OLD_SEQUENCE_ID_FILE_SUFFIX);
}
- /**
- * Create a file with name as region open sequence id
- * @param fs
- * @param regiondir
- * @param newSeqId
- * @param safetyBumper
- * @return long new sequence Id value
- * @throws IOException
- */
- public static long writeRegionSequenceIdFile(final FileSystem fs, final Path regiondir,
- long newSeqId, long safetyBumper) throws IOException {
+ private static FileStatus[] getSequenceIdFiles(FileSystem fs, Path regionDir) throws IOException {
// TODO: Why are we using a method in here as part of our normal region open where
// there is no splitting involved? Fix. St.Ack 01/20/2017.
- Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
- long maxSeqId = 0;
- FileStatus[] files = null;
- if (fs.exists(editsdir)) {
- files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
- @Override
- public boolean accept(Path p) {
- return isSequenceIdFile(p);
- }
- });
- if (files != null) {
- for (FileStatus status : files) {
- String fileName = status.getPath().getName();
- try {
- long tmpSeqId =
- Long.parseLong(fileName.substring(0, fileName.length()
- - SEQUENCE_ID_FILE_SUFFIX_LENGTH));
- maxSeqId = Math.max(tmpSeqId, maxSeqId);
- } catch (NumberFormatException ex) {
- LOG.warn("Invalid SeqId File Name={}", fileName);
- }
- }
- }
+ Path editsDir = WALSplitter.getRegionDirRecoveredEditsDir(regionDir);
+ try {
+ FileStatus[] files = fs.listStatus(editsDir, WALSplitter::isSequenceIdFile);
+ return files != null ? files : new FileStatus[0];
+ } catch (FileNotFoundException e) {
+ return new FileStatus[0];
}
- if (maxSeqId > newSeqId) {
- newSeqId = maxSeqId;
+ }
+
+ private static long getMaxSequenceId(FileStatus[] files) {
+ long maxSeqId = -1L;
+ for (FileStatus file : files) {
+ String fileName = file.getPath().getName();
+ try {
+ maxSeqId = Math.max(maxSeqId, Long
+ .parseLong(fileName.substring(0, fileName.length() - SEQUENCE_ID_FILE_SUFFIX_LENGTH)));
+ } catch (NumberFormatException ex) {
+ LOG.warn("Invalid SeqId File Name={}", fileName);
+ }
}
- newSeqId += safetyBumper; // bump up SeqId
+ return maxSeqId;
+ }
+
+ /**
+ * Get the max sequence id which is stored in the region directory. -1 if none.
+ */
+ public static long getMaxRegionSequenceId(FileSystem fs, Path regionDir) throws IOException {
+ return getMaxSequenceId(getSequenceIdFiles(fs, regionDir));
+ }
+ /**
+ * Create a file with name as region's max sequence id
+ */
+ public static void writeRegionSequenceIdFile(FileSystem fs, Path regionDir, long newMaxSeqId)
+ throws IOException {
+ FileStatus[] files = getSequenceIdFiles(fs, regionDir);
+ long maxSeqId = getMaxSequenceId(files);
+ if (maxSeqId > newMaxSeqId) {
+ throw new IOException("The new max sequence id " + newMaxSeqId +
+ " is less than the old max sequence id " + maxSeqId);
+ }
// write a new seqId file
- Path newSeqIdFile = new Path(editsdir, newSeqId + SEQUENCE_ID_FILE_SUFFIX);
- if (newSeqId != maxSeqId) {
+ Path newSeqIdFile = new Path(WALSplitter.getRegionDirRecoveredEditsDir(regionDir),
+ newMaxSeqId + SEQUENCE_ID_FILE_SUFFIX);
+ if (newMaxSeqId != maxSeqId) {
try {
if (!fs.createNewFile(newSeqIdFile) && !fs.exists(newSeqIdFile)) {
throw new IOException("Failed to create SeqId file:" + newSeqIdFile);
}
- LOG.debug("Wrote file={}, newSeqId={}, maxSeqId={}", newSeqIdFile,
- newSeqId, maxSeqId);
+ LOG.debug("Wrote file={}, newMaxSeqId={}, maxSeqId={}", newSeqIdFile, newMaxSeqId,
+ maxSeqId);
} catch (FileAlreadyExistsException ignored) {
// latest hdfs throws this exception. it's all right if newSeqIdFile already exists
}
}
// remove old ones
- if (files != null) {
- for (FileStatus status : files) {
- if (!newSeqIdFile.equals(status.getPath())) {
- fs.delete(status.getPath(), false);
- }
+ for (FileStatus status : files) {
+ if (!newSeqIdFile.equals(status.getPath())) {
+ fs.delete(status.getPath(), false);
}
}
- return newSeqId;
}
/**
@@ -1817,9 +1816,7 @@ public class WALSplitter {
* @throws IOException
*/
public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells,
- Pair<WALKey, WALEdit> logEntry, Durability durability)
- throws IOException {
-
+ Pair<WALKey, WALEdit> logEntry, Durability durability) throws IOException {
if (entry == null) {
// return an empty array
return Collections.emptyList();
http://git-wip-us.apache.org/repos/asf/hbase/blob/f06a89b5/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java
new file mode 100644
index 0000000..e657d9c
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Testcase for HBASE-20066
+ */
+@Category({ RegionServerTests.class, LargeTests.class })
+public class TestSequenceIdMonotonicallyIncreasing {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSequenceIdMonotonicallyIncreasing.class);
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static final TableName NAME = TableName.valueOf("test");
+
+ private static final byte[] CF = Bytes.toBytes("cf");
+
+ private static final byte[] CQ = Bytes.toBytes("cq");
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ UTIL.startMiniCluster(2);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ UTIL.shutdownMiniCluster();
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ Admin admin = UTIL.getAdmin();
+ if (admin.tableExists(NAME)) {
+ admin.disableTable(NAME);
+ admin.deleteTable(NAME);
+ }
+ }
+
+ private Table createTable(boolean multiRegions) throws IOException {
+ if (multiRegions) {
+ return UTIL.createTable(NAME, CF, new byte[][] { Bytes.toBytes(1) });
+ } else {
+ return UTIL.createTable(NAME, CF);
+ }
+ }
+
+ private long getMaxSeqId(HRegionServer rs, RegionInfo region) throws IOException {
+ Path walFile = ((AbstractFSWAL<?>) rs.getWAL(null)).getCurrentFileName();
+ long maxSeqId = -1L;
+ try (WAL.Reader reader =
+ WALFactory.createReader(UTIL.getTestFileSystem(), walFile, UTIL.getConfiguration())) {
+ for (;;) {
+ WAL.Entry entry = reader.next();
+ if (entry == null) {
+ break;
+ }
+ if (Bytes.equals(region.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName())) {
+ maxSeqId = Math.max(maxSeqId, entry.getKey().getSequenceId());
+ }
+ }
+ }
+ return maxSeqId;
+ }
+
+ @Test
+ public void testSplit()
+ throws IOException, InterruptedException, ExecutionException, TimeoutException {
+ try (Table table = createTable(false)) {
+ table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0)));
+ table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(0)));
+ }
+ UTIL.flush(NAME);
+ HRegionServer rs = UTIL.getRSForFirstRegionInTable(NAME);
+ RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(NAME).get(0).getRegionInfo();
+ UTIL.getAdmin().splitRegionAsync(region.getRegionName(), Bytes.toBytes(1)).get(1,
+ TimeUnit.MINUTES);
+ long maxSeqId = getMaxSeqId(rs, region);
+ RegionLocator locator = UTIL.getConnection().getRegionLocator(NAME);
+ HRegionLocation locA = locator.getRegionLocation(Bytes.toBytes(0), true);
+ HRegionLocation locB = locator.getRegionLocation(Bytes.toBytes(1), true);
+ assertEquals(maxSeqId + 1, locA.getSeqNum());
+ assertEquals(maxSeqId + 1, locB.getSeqNum());
+ }
+
+ @Test
+ public void testMerge()
+ throws IOException, InterruptedException, ExecutionException, TimeoutException {
+ try (Table table = createTable(true)) {
+ table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0)));
+ table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(0)));
+ table.put(new Put(Bytes.toBytes(2)).addColumn(CF, CQ, Bytes.toBytes(0)));
+ }
+ UTIL.flush(NAME);
+ MiniHBaseCluster cluster = UTIL.getMiniHBaseCluster();
+ List<HRegion> regions = cluster.getRegions(NAME);
+ HRegion regionA = regions.get(0);
+ HRegion regionB = regions.get(1);
+ HRegionServer rsA =
+ cluster.getRegionServer(cluster.getServerWith(regionA.getRegionInfo().getRegionName()));
+ HRegionServer rsB =
+ cluster.getRegionServer(cluster.getServerWith(regionB.getRegionInfo().getRegionName()));
+ UTIL.getAdmin().mergeRegionsAsync(regionA.getRegionInfo().getRegionName(),
+ regionB.getRegionInfo().getRegionName(), false).get(1, TimeUnit.MINUTES);
+ long maxSeqIdA = getMaxSeqId(rsA, regionA.getRegionInfo());
+ long maxSeqIdB = getMaxSeqId(rsB, regionB.getRegionInfo());
+ HRegionLocation loc =
+ UTIL.getConnection().getRegionLocator(NAME).getRegionLocation(Bytes.toBytes(0), true);
+ assertEquals(Math.max(maxSeqIdA, maxSeqIdB) + 1, loc.getSeqNum());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f06a89b5/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
index 1f9fc5d..3e2f113 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
@@ -478,42 +478,6 @@ public abstract class AbstractTestDLS {
}
}
- @Test
- public void testReadWriteSeqIdFiles() throws Exception {
- LOG.info("testReadWriteSeqIdFiles");
- startCluster(2);
- final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null);
- Table ht = installTable(zkw, 10);
- try {
- FileSystem fs = master.getMasterFileSystem().getFileSystem();
- Path tableDir =
- FSUtils.getTableDir(FSUtils.getRootDir(conf), TableName.valueOf(name.getMethodName()));
- List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
- long newSeqId = WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 1L, 1000L);
- WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 1L, 1000L);
- assertEquals(newSeqId + 2000,
- WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 3L, 1000L));
-
- Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(regionDirs.get(0));
- FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
- @Override
- public boolean accept(Path p) {
- return WALSplitter.isSequenceIdFile(p);
- }
- });
- // only one seqid file should exist
- assertEquals(1, files.length);
-
- // verify all seqId files aren't treated as recovered.edits files
- NavigableSet<Path> recoveredEdits =
- WALSplitter.getSplitEditFilesSorted(fs, regionDirs.get(0));
- assertEquals(0, recoveredEdits.size());
- } finally {
- if (ht != null) ht.close();
- if (zkw != null) zkw.close();
- }
- }
-
private Table installTable(ZKWatcher zkw, int nrs) throws Exception {
return installTable(zkw, nrs, 0);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f06a89b5/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestReadWriteSeqIdFiles.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestReadWriteSeqIdFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestReadWriteSeqIdFiles.java
new file mode 100644
index 0000000..6e3aa10
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestReadWriteSeqIdFiles.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.wal;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.NavigableSet;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({ RegionServerTests.class, SmallTests.class })
+public class TestReadWriteSeqIdFiles {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestReadWriteSeqIdFiles.class);
+
+ private static final Logger LOG = LoggerFactory.getLogger(TestReadWriteSeqIdFiles.class);
+
+ private static final HBaseCommonTestingUtility UTIL = new HBaseCommonTestingUtility();
+
+ private static FileSystem FS;
+
+ private static Path REGION_DIR;
+
+ @BeforeClass
+ public static void setUp() throws IOException {
+ FS = FileSystem.getLocal(UTIL.getConfiguration());
+ REGION_DIR = UTIL.getDataTestDir();
+ }
+
+ @AfterClass
+ public static void tearDown() throws IOException {
+ UTIL.cleanupTestDir();
+ }
+
+ @Test
+ public void test() throws IOException {
+ WALSplitter.writeRegionSequenceIdFile(FS, REGION_DIR, 1000L);
+ assertEquals(1000L, WALSplitter.getMaxRegionSequenceId(FS, REGION_DIR));
+ WALSplitter.writeRegionSequenceIdFile(FS, REGION_DIR, 2000L);
+ assertEquals(2000L, WALSplitter.getMaxRegionSequenceId(FS, REGION_DIR));
+ // can not write a sequence id which is smaller
+ try {
+ WALSplitter.writeRegionSequenceIdFile(FS, REGION_DIR, 1500L);
+ } catch (IOException e) {
+ // expected
+ LOG.info("Expected error", e);
+ }
+
+ Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(REGION_DIR);
+ FileStatus[] files = FSUtils.listStatus(FS, editsdir, new PathFilter() {
+ @Override
+ public boolean accept(Path p) {
+ return WALSplitter.isSequenceIdFile(p);
+ }
+ });
+ // only one seqid file should exist
+ assertEquals(1, files.length);
+
+ // verify all seqId files aren't treated as recovered.edits files
+ NavigableSet<Path> recoveredEdits = WALSplitter.getSplitEditFilesSorted(FS, REGION_DIR);
+ assertEquals(0, recoveredEdits.size());
+ }
+}
[18/20] hbase git commit: HBASE-18467 report nightly results to devs
via jira
Posted by bu...@apache.org.
HBASE-18467 report nightly results to devs via jira
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/105de8d2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/105de8d2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/105de8d2
Branch: refs/heads/HBASE-15151
Commit: 105de8d275418892e0baa7f7ab68f84f6d167d98
Parents: 0fa5d69
Author: Sean Busbey <bu...@apache.org>
Authored: Wed Aug 9 00:48:46 2017 -0500
Committer: Sean Busbey <bu...@apache.org>
Committed: Tue Feb 27 11:28:24 2018 -0600
----------------------------------------------------------------------
dev-support/Jenkinsfile | 165 +++++++++++++++++++++++++++++++++++++++----
1 file changed, 150 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/105de8d2/dev-support/Jenkinsfile
----------------------------------------------------------------------
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 201783b..31416dd 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -34,6 +34,12 @@ pipeline {
// where we check out to across stages
BASEDIR = "${env.WORKSPACE}/component"
YETUS_RELEASE = '0.7.0'
+ // where we'll write everything from different steps. Need a copy here so the final step can check for success/failure.
+ OUTPUT_DIR_RELATIVE_GENERAL = 'output-general'
+ OUTPUT_DIR_RELATIVE_JDK7 = 'output-jdk7'
+ OUTPUT_DIR_RELATIVE_HADOOP2 = 'output-jdk8-hadoop2'
+ OUTPUT_DIR_RELATIVE_HADOOP3 = 'output-jdk8-hadoop3'
+
PROJECT = 'hbase'
PROJECT_PERSONALITY = 'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
// This section of the docs tells folks not to use the javadoc tag. older branches have our old version of the check for said tag.
@@ -119,8 +125,8 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
// on branches that don't support jdk7, this will already be JAVA_HOME, so we'll end up not
// doing multijdk there.
MULTIJDK = '/usr/lib/jvm/java-8-openjdk-amd64'
- OUTPUT_DIR_RELATIVE = "output-general"
- OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
+ OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}"
+ OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}"
}
steps {
unstash 'yetus'
@@ -130,7 +136,18 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
"${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
'''
// TODO should this be a download from master, similar to how the personality is?
- sh "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
+ sh '''#!/usr/bin/env bash
+ declare commentfile
+ rm -rf "${OUTPUT_DIR}/success}" "${OUTPUT_DIR}/failure"
+ if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+ commentfile="${OUTPUT_DIR}/success"
+ echo '(/) {color:green}+1 general checks{color}' >> "${commentfile}"
+ else
+ commentfile="${OUTPUT_DIR}/failure"
+ echo '(x) {color:red}-1 general checks{color}' >> "${commentfile}"
+ fi
+ echo "-- For more information [see general report|${BUILD_URL}/General_Nightly_Build_Report/]" >> "${commentfile}"
+ '''
}
post {
always {
@@ -155,8 +172,8 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
}
environment {
TESTS = 'mvninstall,compile,javac,unit,htmlout'
- OUTPUT_DIR_RELATIVE = "output-jdk7"
- OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
+ OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_JDK7}"
+ OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_JDK7}"
// On branches where we do jdk7 checks, jdk7 will be JAVA_HOME already.
}
steps {
@@ -166,13 +183,22 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
"${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
'''
- sh """#!/usr/bin/env bash
+ sh '''#!/usr/bin/env bash
# for branch-1.1 we don't do jdk8 findbugs, so do it here
- if [ "${env.BRANCH_NAME}" == "branch-1.1" ]; then
+ if [ "${BRANCH_NAME}" == "branch-1.1" ]; then
TESTS+=",findbugs"
fi
- "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
- """
+ declare commentfile
+ rm -rf "${OUTPUT_DIR}/success}" "${OUTPUT_DIR}/failure"
+ if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+ commentfile="${OUTPUT_DIR}/success"
+ echo '(/) {color:green}+1 jdk7 checks{color}' >> "${commentfile}"
+ else
+ commentfile="${OUTPUT_DIR}/failure"
+ echo '(x) {color:red}-1 jdk7 checks{color}' >> "${commentfile}"
+ fi
+ echo "-- For more information [see jdk7 report|${BUILD_URL}/JDK7_Nightly_Build_Report/]" >> "${commentfile}"
+ '''
}
post {
always {
@@ -214,8 +240,8 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
}
environment {
TESTS = 'mvninstall,compile,javac,unit,htmlout'
- OUTPUT_DIR_RELATIVE = "output-jdk8-hadoop2"
- OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
+ OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
+ OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
// This isn't strictly needed on branches that only support jdk8, but doesn't hurt
// and is needed on branches that do both jdk7 and jdk8
SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
@@ -227,7 +253,18 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
"${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
'''
- sh "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
+ sh '''#!/usr/bin/env bash
+ declare commentfile
+ rm -rf "${OUTPUT_DIR}/success}" "${OUTPUT_DIR}/failure"
+ if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+ commentfile="${OUTPUT_DIR}/success"
+ echo '(/) {color:green}+1 jdk8 hadoop2 checks{color}' >> "${commentfile}"
+ else
+ commentfile="${OUTPUT_DIR}/failure"
+ echo '(x) {color:red}-1 jdk8 hadoop2 checks{color}' >> "${commentfile}"
+ fi
+ echo "-- For more information [see jdk8 (hadoop2) report|${BUILD_URL}/JDK8_Nightly_Build_Report_(Hadoop2)/]" >> "${commentfile}"
+ '''
}
post {
always {
@@ -272,8 +309,8 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
// Findbugs is part of this last yetus stage to prevent findbugs precluding hadoop3
// tests.
TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
- OUTPUT_DIR_RELATIVE = "output-jdk8-hadoop3"
- OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
+ OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
+ OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
// This isn't strictly needed on branches that only support jdk8, but doesn't hurt
// and is needed on branches that do both jdk7 and jdk8
SET_JAVA_HOME = '/usr/lib/jvm/java-8-openjdk-amd64'
@@ -287,7 +324,18 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
"${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
'''
- sh "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
+ sh '''#!/usr/bin/env bash
+ declare commentfile
+ rm -rf "${OUTPUT_DIR}/success}" "${OUTPUT_DIR}/failure"
+ if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
+ commentfile="${OUTPUT_DIR}/success"
+ echo '(/) {color:green}+1 jdk8 hadoop3 checks{color}' >> "${commentfile}"
+ else
+ commentfile="${OUTPUT_DIR}/failure"
+ echo '(x) {color:red}-1 jdk8 hadoop3 checks{color}' >> "${commentfile}"
+ fi
+ echo "-- For more information [see jdk8 (hadoop3) report|${BUILD_URL}/JDK8_Nightly_Build_Report_(Hadoop3)/]" >> "${commentfile}"
+ '''
}
post {
always {
@@ -337,6 +385,7 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
rm -rf "unpacked_src_tarball" && mkdir "unpacked_src_tarball"
rm -rf ".m2-for-repo" && mkdir ".m2-for-repo"
rm -rf ".m2-for-src" && mkdir ".m2-for-src"
+ rm -rf "src_tarball_success" "src_tarball_failure"
'''
sh '''#!/usr/bin/env bash
rm -rf "output-srctarball/machine" && mkdir "output-srctarball/machine"
@@ -356,7 +405,93 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
always {
archive 'output-srctarball/*'
}
+ // This approach only works because the source release artifact is the last stage that does work.
+ success {
+ writeFile file: "${env.WORKSPACE}/src_tarball_success", text: '(/) {color:green}+1 source release artifact{color}\n-- See build output for details.'
+ }
+ failure {
+ writeFile file: "${env.WORKSPACE}/src_tarball_failure", text: '(x) {color:red}-1 source release artifact{color}\n-- See build output for details.'
+ }
+ }
+ }
+ stage ('Fail if previous stages failed') {
+ steps {
+ script {
+ def failures = ['src_tarball_failure', "${env.OUTPUT_DIR_RELATIVE_GENERAL}/failure",
+ "${env.OUTPUT_DIR_RELATIVE_JDK7}/failure", "${OUTPUT_DIR_RELATIVE_HADOOP2}/failure",
+ "${env.OUTPUT_DIR_RELATIVE_HADOOP3}/failure"]
+ for ( failure_file in failures ) {
+ if (fileExists(file: failure_file)) {
+ error 'Failing job due to failure(s) in prior steps.'
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ always {
+ script {
+ try {
+ sh "printenv"
+ def results = ["${env.OUTPUT_DIR_RELATIVE_GENERAL}/failure", "${env.OUTPUT_DIR_RELATIVE_GENERAL}/success",
+ "${env.OUTPUT_DIR_RELATIVE_JDK7}/failure", "${env.OUTPUT_DIR_RELATIVE_JDK7}/success",
+ "${env.OUTPUT_DIR_RELATIVE_HADOOP2}/failure", "${env.OUTPUT_DIR_RELATIVE_HADOOP2}/success",
+ "${env.OUTPUT_DIR_RELATIVE_HADOOP3}/failure", "${env.OUTPUT_DIR_RELATIVE_HADOOP3}/success",
+ 'src_tarball_failure', 'src_tarball_success']
+ echo env.BRANCH_NAME
+ echo env.BUILD_URL
+ echo currentBuild.result
+ echo currentBuild.durationString
+ def comment = "Results for branch ${env.BRANCH_NAME}, done in ${currentBuild.durationString}\n"
+ comment += "\t[build ${currentBuild.displayName} on builds.a.o|${env.BUILD_URL}]: ${currentBuild.result}\n----\ndetails (if available):\n\n"
+ if (currentBuild.result == "SUCCESS") {
+ comment += "(/) *{color:green}+1 overall{color}*\n\n"
+ } else {
+ comment += "(x) *{color:red}-1 overall{color}*\n"
+ // Ideally get the committer our of the change and @ mention them in the per-jira comment
+ comment += " Committer, please check your recent inclusion of a patch for this issue.\n\n"
+ }
+ echo ""
+ echo "[DEBUG] trying to aggregate step-wise results"
+ comment += results.collect { fileExists(file: it) ? readFile(file: it) : "" }.join("\n\n")
+ echo "[INFO] Comment:"
+ echo comment
+ echo ""
+ echo "[INFO] There are ${currentBuild.changeSets.size()} change sets."
+ getJirasToComment(currentBuild).each { currentIssue ->
+ jiraComment issueKey: currentIssue, body: comment
+ }
+ } catch (Exception exception) {
+ echo "Got exception: ${exception}"
+ echo " ${exception.getStackTrace()}"
+ }
+ }
+ }
+ }
+}
+import org.jenkinsci.plugins.workflow.support.steps.build.RunWrapper
+@NonCPS
+List<String> getJirasToComment(RunWrapper thisBuild) {
+ def seenJiras = []
+ thisBuild.changeSets.each { cs ->
+ cs.getItems().each { change ->
+ CharSequence msg = change.msg
+ echo "change: ${change}"
+ echo " ${msg}"
+ echo " ${change.commitId}"
+ echo " ${change.author}"
+ echo ""
+ msg.eachMatch("HBASE-[0-9]+") { currentIssue ->
+ echo "[DEBUG] found jira key: ${currentIssue}"
+ if (currentIssue in seenJiras) {
+ echo "[DEBUG] already commented on ${currentIssue}."
+ } else {
+ echo "[INFO] commenting on ${currentIssue}."
+ seenJiras << currentIssue
+ }
}
}
}
+ return seenJiras
}
[03/20] hbase git commit: HBASE-20082 Fix findbugs errors only on
master which are introduced by HBASE-19397
Posted by bu...@apache.org.
HBASE-20082 Fix findbugs errors only on master which are introduced by HBASE-19397
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8c74d177
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8c74d177
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8c74d177
Branch: refs/heads/HBASE-15151
Commit: 8c74d177f68bbd5412cef96dc33f16ba33ff7875
Parents: a34f129
Author: zhangduo <zh...@apache.org>
Authored: Mon Feb 26 10:22:09 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Mon Feb 26 15:50:51 2018 +0800
----------------------------------------------------------------------
.../hadoop/hbase/master/replication/RefreshPeerProcedure.java | 2 ++
.../hbase/replication/regionserver/ReplicationSourceManager.java | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/8c74d177/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
index 1253ef9..ba9bcdc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
@@ -50,6 +50,8 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
private PeerOperationType type;
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC",
+ justification = "Will never change after construction")
private ServerName targetServer;
private boolean dispatched;
http://git-wip-us.apache.org/repos/asf/hbase/blob/8c74d177/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 4e1b20d..ebb8042 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -403,7 +403,7 @@ public class ReplicationSourceManager implements ReplicationListener {
toStartup.add(replicationSource);
}
}
- for (ReplicationSourceInterface replicationSource : oldsources) {
+ for (ReplicationSourceInterface replicationSource : toStartup) {
replicationSource.startup();
}
}
[15/20] hbase git commit: Revert "for creating patch
HBASE-20074-V01.patch"
Posted by bu...@apache.org.
Revert "for creating patch HBASE-20074-V01.patch"
This reverts commit 73028d5bd9f85655b284654579ddcbbca31e41e8.
bad commit message
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e4ce38df
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e4ce38df
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e4ce38df
Branch: refs/heads/HBASE-15151
Commit: e4ce38df326e750015f6077b8ca8de9788cce4d4
Parents: 3443aa9
Author: Sean Busbey <bu...@apache.org>
Authored: Tue Feb 27 11:20:54 2018 -0600
Committer: Sean Busbey <bu...@apache.org>
Committed: Tue Feb 27 11:20:54 2018 -0600
----------------------------------------------------------------------
.../hadoop/hbase/regionserver/CompactingMemStore.java | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/e4ce38df/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index d60b049..44b40eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -136,10 +136,14 @@ public class CompactingMemStore extends AbstractMemStore {
numStores = 1;
}
inmemoryFlushSize = memstoreFlushSize / numStores;
- // multiply by a factor (the same factor for all index types)
- factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
+ // multiply by a factor (different factors for different index types)
+ if (indexType == IndexType.ARRAY_MAP) {
+ factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
-
+ } else {
+ factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
+ IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
+ }
inmemoryFlushSize = (long) (inmemoryFlushSize * factor);
LOG.info("Setting in-memory flush size threshold to {} and immutable segments index to type={}",
StringUtils.byteDesc(inmemoryFlushSize), indexType);
[17/20] hbase git commit: HBASE-18467 temporarily mute posting to jira
Posted by bu...@apache.org.
HBASE-18467 temporarily mute posting to jira
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dd7c231a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dd7c231a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dd7c231a
Branch: refs/heads/HBASE-15151
Commit: dd7c231ab27caab395d3a66f9a71673ea1eb1d1d
Parents: 105de8d
Author: Sean Busbey <bu...@apache.org>
Authored: Sun Feb 25 00:59:52 2018 -0600
Committer: Sean Busbey <bu...@apache.org>
Committed: Tue Feb 27 11:28:24 2018 -0600
----------------------------------------------------------------------
dev-support/Jenkinsfile | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/dd7c231a/dev-support/Jenkinsfile
----------------------------------------------------------------------
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 31416dd..22f3f21 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -460,7 +460,8 @@ curl -L -o personality.sh "${env.PROJECT_PERSONALITY}"
echo ""
echo "[INFO] There are ${currentBuild.changeSets.size()} change sets."
getJirasToComment(currentBuild).each { currentIssue ->
- jiraComment issueKey: currentIssue, body: comment
+// jiraComment issueKey: currentIssue, body: comment
+ echo "jiraComment issueKey: ${currentIssue}, body: ${comment}"
}
} catch (Exception exception) {
echo "Got exception: ${exception}"