You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zg...@apache.org on 2020/08/10 01:26:01 UTC

[hbase] branch HBASE-24666 updated (fae9f0c -> 9c61d14)

This is an automated email from the ASF dual-hosted git repository.

zghao pushed a change to branch HBASE-24666
in repository https://gitbox.apache.org/repos/asf/hbase.git.


    omit fae9f0c  HBASE-24681 Remove the cache walsById/walsByIdRecoveredQueues from ReplicationSourceManager (#2019)
    omit bb9cae1  HBASE-24682 Refactor ReplicationSource#addHFileRefs method: move it to ReplicationSourceManager (#2020)
     add 430602a  HBASE-24740 Enable journal logging for HBase snapshot operation (#2104)
     add f2c087c  Revert "HBASE-24718 : Generic NamedQueue framework for multiple use-cases (Refactor SlowLog responses)"
     add ea96743  HBASE-24710 Incorrect checksum calculation in saveVersion.sh (#2056)
     add fccd03f  HBASE-24555 Correct the description of hbase.hregion.max.filesize (#1895)
     add 3d270ba  HBASE-24696 Include JVM information on Web UI under "Software Attributes"
     add 32796ad  Revert "HBASE-24743 Reject to add a peer which replicate to itself earlier (#2071)"
     add f35c5ea  HBASE-24718 : Generic NamedQueue framework for multiple use-cases (Refactor SlowLog responses) (#2109)
     add a6e3db5  HBASE-24662 Update DumpClusterStatusAction to notice changes in region server count
     add 8191fbd  HBASE-22146 Removing a namespace-level space quota does not remove policies against contained tables
     add e7963f6  HBASE-24763 Remove 2.1 Documentation direct link from website (#2129)
     add 32c7012  HBASE-24743 Reject to add a peer which replicate to itself earlier (#2122)
     add 0b85729  HBASE-24762 Purge protobuf java 2.5.0 dependency (#2128)
     add 09e7ccd  HBASE-24757 : ReplicationSink should limit row count in batch mutation based on hbase.rpc.rows.warning.threshold
     add 975cdf7  HBASE-24665 all wal of RegionGroupingProvider together roll (#2021)
     add 8c0d7fa  HBASE-24758 Avoid flooding replication source RSes logs when no sinks… (#2118)
     add 82d0990  HBASE-24777 InfoServer support ipv6 host and port
     add 477debd  HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)
     add 7e6e7a7  HBASE-24770 Reimplement the Constraints API and revisit the IA annotations on related classes (#2140)
     add 7eff07d  HBASE-11686 Shell code should create a binding / irb workspace instead of polluting the root namespace (#2141)
     add 4471a64  HBASE-24738 [Shell] processlist command fails with ERROR: Unexpected end of file from server when SSL enabled (#2123)
     add 7974a1e  HBASE-11676 Scan FORMATTER is not applied for columns using non-printable name in shell (#2161)
     add 5480f35  HBASE-24790 Remove unused counter from SplitLogCounters (#2164)
     add 56f32ea  HBASE-24669 Logging of ppid should be consistent across all occurrences
     add 345b77a  HBASE-24632 Enable procedure-based log splitting as default in hbase3 Add deprecation of 'classic' zk-based WAL splitter. (#2156)
     add 652f1e6  HBASE-24766 Document Remote Procedure Execution (#2131)
     add 5f27a00  HBASE-24752 NPE/500 accessing webui on master startup
     add d65fb87  HBASE-24788: Fix the connection leaks on getting hbase admin from unclosed connection (#2162)
     add f07f30a  HBASE-20226: Parallelize region manifest deletes (#2159)
     add 1b9269d  HBASE-24797 Move log code out of loop
     add 840a557  HBASE-24794 hbase.rowlock.wait.duration should not be <= 0 (#2174)
     add 7c4d66a  HBASE-24803 Unify hbase-shell ::Shell::Commands::Command#help behavior (#2178)
     add a3f623e  HBASE-24695 FSHLog - close the current WAL file in a background thread. (#2168)
     add e22a2d2  HBASE-24680 Refactor the checkAndMutate code on the server side (#2094)
     add fe5571b  HBASE-24572 release scripts should try to use a keyid when refering to GPG keys. (#2001)
     add 3470fee  HBASE-24476 release scripts should provide timing information (#2145)
     add 86ebbdd  HBASE-24805 HBaseTestingUtility.getConnection should be threadsafe
     add 492cf10  HBASE-24704 Make Table Schema easier to view with multiple families
     add 9a1bad8  HBASE-24713 RS startup with FSHLog throws NPE after HBASE-21751 (#2125)
     add 8e33bb0  HBASE-24791 Improve HFileOutputFormat2 to avoid always call getTableRelativePath method (#2167)
     add ad7caf7  HBASE-24795 : RegionMover to deal with unknown region while (un)loading
     add 9ec484a  HBASE-24795 : TestRegionMover2#testWithSplitRegions fix (ADDENDUM)
     add 21a0b8e  HBASE-24767 Change default to false for HBASE-15519 per-user metrics Set hbase.regionserver.user.metrics.enabled default to false; i.e. off. (#2132)
     add 148c185  HBASE-24808 skip empty log cleaner delegate class names (WAS => cleaner.CleanerChore: Can NOT create CleanerDelegate= ClassNotFoundException) (#2181)
     add d2f5a5f  HBAE-24507 Remove HTableDescriptor and HColumnDescriptor (#2186)
     add c39cad2  HBASE-24821 simplify the logic of getRegionInfo in TestFlushFromClient to reduce redundancy code
     add f8b887b  HBASE-24823 Port HBASE-22762 Print the delta between phases in the split/merge/compact/flush transaction journals
     add c645cb5  BackPort HBASE-11554 Remove Reusable poolmap Rpc client type. (#2208)
     add 8003a15  HBASE-21905 (addendum): Address compaction races in TestFIFOCompactionPolicy (#2203)
     add 0ae125a  HBASE-24817 Allow configuring WALEntry filters on ReplicationSource (#2198)
     add f710d2d  HBASE-24659 Calculate FIXED_OVERHEAD automatically (#2018)
     add 9b49bd6  HBASE-24826 Add some comments for processlist in hbase shell (#2207)
     add 485e0d2  HBASE-24694 Support flush a single column family of table (#2179)
     add 0b604d9  HBASE-24750 : All ExecutorService should use guava ThreadFactoryBuilder
     add 6ef90aa  HBASE-24824 Add more stats in PE for read replica (#2205)
     add 124af63  HBASE-24779 Report on the WAL edit buffer usage/limit for replication
     add d492aea  HBASE-21721 FSHLog : reduce write#syncs() times (#2217)
     add 3edfa10  HBASE-24788 : TableOutputFormat source compatibility (ADDENDUM)
     add 11ac0a1  HBASE-24830 Some tests involving RS crash fail with NullPointerException after HBASE-24632 in branch-2
     add 726756a  HBASE-23157 WAL unflushed seqId tracking may wrong when Durability.ASYNC_WAL is used (#762)
     new d45171f  HBASE-24682 Refactor ReplicationSource#addHFileRefs method: move it to ReplicationSourceManager (#2020)
     new 9c61d14  HBASE-24681 Remove the cache walsById/walsByIdRecoveredQueues from ReplicationSourceManager (#2019)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (fae9f0c)
            \
             N -- N -- N   refs/heads/HBASE-24666 (9c61d14)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 bin/hirb.rb                                        |  87 +-
 dev-support/create-release/release-build.sh        |   8 +-
 dev-support/create-release/release-util.sh         |  55 +-
 .../exemplars/shaded_client/HelloHBase.java        |   6 +-
 .../LogRollBackupSubprocedurePool.java             |   9 +-
 .../apache/hadoop/hbase/backup/TestBackupBase.java |  15 +-
 .../hadoop/hbase/backup/TestIncrementalBackup.java |  26 +-
 .../backup/TestIncrementalBackupWithFailures.java  |  10 +-
 .../hadoop/hbase/backup/TestRemoteBackup.java      |  21 +-
 hbase-client/pom.xml                               |   4 -
 .../org/apache/hadoop/hbase/HColumnDescriptor.java | 792 ----------------
 .../org/apache/hadoop/hbase/HTableDescriptor.java  | 992 ---------------------
 .../java/org/apache/hadoop/hbase/client/Admin.java |  10 +
 .../hadoop/hbase/client/AdminOverAsyncAdmin.java   |   5 +
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |   8 +
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |   5 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java       |   5 +
 .../apache/hadoop/hbase/client/CheckAndMutate.java |  11 +-
 .../hadoop/hbase/client/ClusterStatusListener.java |   7 +-
 .../client/ColumnFamilyDescriptorBuilder.java      |  13 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  12 +-
 .../hbase/client/TableDescriptorBuilder.java       |  52 +-
 .../apache/hadoop/hbase/ipc/AbstractRpcClient.java |   8 +-
 .../hadoop/hbase/ipc/NettyRpcConnection.java       |   7 +-
 .../apache/hadoop/hbase/quotas/QuotaTableUtil.java |  28 +
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  72 ++
 .../java/org/apache/hadoop/hbase/util/PoolMap.java |  49 +-
 .../apache/hadoop/hbase/TestHColumnDescriptor.java | 148 ---
 .../apache/hadoop/hbase/TestHTableDescriptor.java  | 369 --------
 .../client/TestColumnFamilyDescriptorBuilder.java  |  64 +-
 .../hbase/client/TestTableDescriptorBuilder.java   | 170 ++--
 .../hadoop/hbase/util/TestReusablePoolMap.java     |  90 --
 hbase-common/pom.xml                               |   4 -
 .../java/org/apache/hadoop/hbase/HConstants.java   |  24 +-
 .../apache/hadoop/hbase/io/hfile/HFileContext.java |   8 +-
 .../hadoop/hbase/util/ImmutableByteArray.java      |   1 +
 .../java/org/apache/hadoop/hbase/util/Threads.java |  72 +-
 hbase-common/src/main/resources/hbase-default.xml  |   8 +-
 hbase-common/src/saveVersion.sh                    |  22 +-
 .../hadoop/hbase/HBaseCommonTestingUtility.java    |   2 +-
 .../coprocessor/TestBatchCoprocessorEndpoint.java  |   9 +-
 .../hbase/coprocessor/TestCoprocessorEndpoint.java |   9 +-
 .../coprocessor/TestCoprocessorTableEndpoint.java  |  32 +-
 hbase-examples/pom.xml                             |   1 +
 .../hbase/client/example/AsyncClientExample.java   |   4 +-
 .../regionserver/MetricsRegionServerSource.java    |   7 +
 .../MetricsRegionServerSourceImpl.java             |   7 +
 .../MetricsReplicationGlobalSourceSource.java      | 248 +-----
 ... MetricsReplicationGlobalSourceSourceImpl.java} |  19 +-
 .../MetricsReplicationSourceFactory.java           |   2 +-
 .../MetricsReplicationSourceFactoryImpl.java       |   4 +-
 .../hadoop/hbase/hbtop/field/FieldValue.java       |   5 +-
 .../org/apache/hadoop/hbase/http/InfoServer.java   |   6 +-
 .../IntegrationTestIngestStripeCompactions.java    |  16 +-
 .../hadoop/hbase/IntegrationTestIngestWithMOB.java |   8 +-
 .../hadoop/hbase/IntegrationTestLazyCfLoading.java |  13 +-
 .../hadoop/hbase/IntegrationTestMobCompaction.java |  20 +-
 .../StripeCompactionsPerformanceEvaluation.java    |  38 +-
 .../chaos/actions/DumpClusterStatusAction.java     |  62 +-
 .../chaos/policies/TwoConcurrentActionPolicy.java  |   4 +-
 .../hbase/test/IntegrationTestBigLinkedList.java   |  42 +-
 ...IntegrationTestBigLinkedListWithVisibility.java |  12 +-
 .../hbase/test/IntegrationTestLoadAndVerify.java   |  14 +-
 ...grationTestWithCellVisibilityLoadAndVerify.java |   6 +-
 .../hadoop/hbase/mapred/TableRecordReader.java     |   2 +-
 .../hadoop/hbase/mapred/TableRecordReaderImpl.java |   6 +-
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java |  49 +-
 .../apache/hadoop/hbase/mapreduce/ImportTsv.java   |  25 +-
 .../hadoop/hbase/mapreduce/TableOutputFormat.java  |  16 +-
 .../hbase/mapreduce/TableRecordReaderImpl.java     |   6 +-
 .../apache/hadoop/hbase/PerformanceEvaluation.java | 156 +++-
 .../hbase/mapreduce/TestTimeRangeMapRed.java       |  12 +-
 .../hadoop/hbase/procedure2/ProcedureExecutor.java |   3 +-
 .../procedure2/RemoteProcedureDispatcher.java      |   5 +-
 .../apache/hadoop/hbase/rest/SchemaResource.java   |   5 +-
 .../hadoop/hbase/rest/model/ColumnSchemaModel.java |  42 +-
 .../hadoop/hbase/rest/model/TableSchemaModel.java  |  30 +-
 .../src/main/resources/hbase-webapps/rest/rest.jsp |   6 +
 .../hadoop/hbase/rest/PerformanceEvaluation.java   |  20 +-
 .../hadoop/hbase/rest/TestScannersWithFilters.java |  10 +-
 .../hbase/rest/client/TestRemoteAdminRetries.java  |  10 +-
 .../hadoop/hbase/rest/client/TestRemoteTable.java  |  16 +-
 hbase-server/pom.xml                               |   4 -
 .../hbase/tmpl/master/MasterStatusTmpl.jamon       |  13 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon     |   6 +
 .../hadoop/hbase/tmpl/tool/CanaryStatusTmpl.jamon  |   6 +
 .../org/apache/hadoop/hbase/LocalHBaseCluster.java |  31 +-
 .../org/apache/hadoop/hbase/SplitLogCounters.java  |   8 +-
 .../java/org/apache/hadoop/hbase/SplitLogTask.java |   3 +
 .../apache/hadoop/hbase/constraint/Constraint.java |  78 +-
 .../hadoop/hbase/constraint/Constraints.java       | 605 +++++--------
 .../hadoop/hbase/constraint/package-info.java      | 465 +++++-----
 .../coordination/SplitLogManagerCoordination.java  |   3 +
 .../coordination/SplitLogWorkerCoordination.java   |  10 +-
 .../coordination/ZkCoordinatedStateManager.java    |   3 +
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 189 ++++
 .../hadoop/hbase/io/hfile/BlockCacheKey.java       |  11 +-
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   |   9 +-
 .../hadoop/hbase/io/hfile/LruBlockCache.java       | 107 +--
 .../apache/hadoop/hbase/ipc/FifoRpcScheduler.java  |  14 +-
 .../hadoop/hbase/ipc/MasterFifoRpcScheduler.java   |  17 +-
 .../hbase/master/ClusterStatusPublisher.java       |   7 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |   8 +-
 .../hadoop/hbase/master/MasterRpcServices.java     |   7 +
 .../hadoop/hbase/master/MasterWalManager.java      |  34 +-
 .../hbase/master/MetricsMasterWrapperImpl.java     |   3 +
 .../hadoop/hbase/master/SplitLogManager.java       |   3 +
 .../hadoop/hbase/master/SplitWALManager.java       |  41 +-
 .../hbase/master/assignment/AssignmentManager.java |   3 +-
 .../assignment/SplitTableRegionProcedure.java      |   4 +-
 .../hadoop/hbase/master/cleaner/CleanerChore.java  |   6 +-
 .../hadoop/hbase/master/cleaner/DirScanPool.java   |   4 +-
 .../master/procedure/ServerCrashProcedure.java     |  55 +-
 .../master/procedure/ServerRemoteProcedure.java    |  45 +-
 .../master/procedure/SplitWALRemoteProcedure.java  |   2 +-
 .../hbase/master/snapshot/SnapshotManager.java     |  18 +-
 .../hbase/master/snapshot/TakeSnapshotHandler.java |  10 +-
 .../hadoop/hbase/monitoring/MonitoredTaskImpl.java |  21 +-
 .../hbase/namequeues/NamedQueueRecorder.java       |   5 +-
 .../hbase/procedure/ProcedureCoordinator.java      |   7 +-
 .../hadoop/hbase/procedure/ProcedureMember.java    |   7 +-
 .../procedure/flush/FlushTableSubprocedure.java    |  25 +-
 .../flush/MasterFlushTableProcedureManager.java    |  12 +-
 .../RegionServerFlushTableProcedureManager.java    |  22 +-
 .../org/apache/hadoop/hbase/quotas/QuotaUtil.java  |  35 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 156 ++--
 .../apache/hadoop/hbase/regionserver/HStore.java   |   4 +-
 .../hadoop/hbase/regionserver/LogRoller.java       |   4 +-
 .../hadoop/hbase/regionserver/MemStoreFlusher.java |   6 +-
 .../hbase/regionserver/MetricsRegionServer.java    |   4 +
 .../regionserver/MetricsUserAggregateFactory.java  |   2 +-
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 440 ++++-----
 .../apache/hadoop/hbase/regionserver/Region.java   |  44 +
 .../hbase/regionserver/RegionCoprocessorHost.java  | 311 +------
 .../hadoop/hbase/regionserver/SplitLogWorker.java  |  11 +-
 .../regionserver/handler/WALSplitterHandler.java   |   5 +-
 .../snapshot/RegionServerSnapshotManager.java      |   3 +-
 .../hbase/regionserver/wal/AbstractFSWAL.java      |   4 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java      |  84 +-
 .../regionserver/wal/SequenceIdAccounting.java     |  29 +-
 .../replication/HBaseReplicationEndpoint.java      |   4 +-
 .../replication/SystemTableWALEntryFilter.java     |  12 +-
 .../hadoop/hbase/replication/WALEntryFilter.java   |   8 +-
 .../HBaseInterClusterReplicationEndpoint.java      |  12 +-
 .../replication/regionserver/MetricsSource.java    |  19 +-
 .../replication/regionserver/Replication.java      |   8 +-
 .../replication/regionserver/ReplicationSink.java  |  48 +-
 .../regionserver/ReplicationSinkManager.java       |   3 +
 .../regionserver/ReplicationSource.java            |  93 +-
 .../regionserver/ReplicationSourceManager.java     |  25 +-
 .../regionserver/ReplicationSourceWALReader.java   |  11 +-
 .../hbase/security/access/ZKPermissionWatcher.java |   4 +-
 .../security/visibility/VisibilityController.java  |  22 +-
 .../hadoop/hbase/snapshot/SnapshotManifest.java    | 114 ++-
 .../hadoop/hbase/tool/HFileContentValidator.java   |   4 +-
 .../java/org/apache/hadoop/hbase/util/FSUtils.java |   3 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |   4 +-
 .../org/apache/hadoop/hbase/util/JvmVersion.java   |  15 +-
 .../hadoop/hbase/util/ModifyRegionUtils.java       |   3 +-
 .../org/apache/hadoop/hbase/util/MoveWithAck.java  | 158 ++++
 .../apache/hadoop/hbase/util/MoveWithoutAck.java   |  71 ++
 .../org/apache/hadoop/hbase/util/RegionMover.java  | 210 +----
 .../hadoop/hbase/util/ServerRegionReplicaUtil.java |  26 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java    |  17 +-
 .../apache/hadoop/hbase/wal/AbstractWALRoller.java | 130 ++-
 .../hadoop/hbase/wal/DisabledWALProvider.java      |   2 +-
 .../org/apache/hadoop/hbase/wal/OutputSink.java    |   3 +-
 .../main/java/org/apache/hadoop/hbase/wal/WAL.java |   6 +-
 .../org/apache/hadoop/hbase/wal/WALSplitUtil.java  | 105 +--
 .../org/apache/hadoop/hbase/wal/WALSplitter.java   | 288 +++---
 .../resources/hbase-webapps/master/rsgroup.jsp     |  12 +-
 .../main/resources/hbase-webapps/master/table.jsp  |  55 +-
 .../hadoop/hbase/AcidGuaranteesTestTool.java       |   7 +-
 .../apache/hadoop/hbase/HBaseTestingUtility.java   | 385 ++++----
 .../hadoop/hbase/TestClientClusterMetrics.java     |   6 +
 ...TestColumnFamilyDescriptorDefaultVersions.java} |  57 +-
 .../org/apache/hadoop/hbase/TestMultiVersions.java |  43 +-
 .../apache/hadoop/hbase/TestRegionRebalancing.java |   9 +-
 .../org/apache/hadoop/hbase/TestSerialization.java |  31 +-
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |  87 +-
 .../TestAsyncReplicationAdminApiWithClusters.java  |  14 +-
 .../hbase/client/TestAsyncTableAdminApi3.java      |   6 +-
 .../client/TestAsyncTableGetMultiThreaded.java     |   6 +-
 .../hbase/client/TestBlockEvictionFromClient.java  |   9 +-
 .../hadoop/hbase/client/TestEnableTable.java       |  24 +-
 .../hadoop/hbase/client/TestFlushFromClient.java   |  25 +-
 .../hadoop/hbase/client/TestFromClientSide.java    |  25 +-
 .../hadoop/hbase/client/TestFromClientSide3.java   |  58 +-
 .../hadoop/hbase/client/TestFromClientSide4.java   |  19 +-
 .../hadoop/hbase/client/TestFromClientSide5.java   |  19 +-
 .../hbase/client/TestIllegalTableDescriptor.java   | 142 ++-
 .../hbase/client/TestIncrementsFromClientSide.java |  12 +-
 .../hbase/client/TestIntraRowPagination.java       |  11 +-
 .../hbase/client/TestReplicaWithCluster.java       | 101 +--
 .../hadoop/hbase/client/TestReplicasClient.java    |  11 +-
 .../hbase/client/TestScannersFromClientSide.java   |   3 +-
 .../hbase/client/TestServerBusyException.java      |   6 +-
 .../hadoop/hbase/client/TestSizeFailures.java      |  10 +-
 .../client/TestSnapshotCloneIndependence.java      |  18 +-
 .../hbase/client/TestSnapshotFromClient.java       |  11 +-
 .../hadoop/hbase/client/TestSnapshotMetadata.java  |  59 +-
 .../client/TestSnapshotTemporaryDirectory.java     |   7 +-
 .../hadoop/hbase/constraint/TestConstraint.java    |  95 +-
 .../hadoop/hbase/constraint/TestConstraints.java   | 136 ++-
 .../hbase/coprocessor/SimpleRegionObserver.java    |  39 +
 .../coprocessor/TestCoprocessorConfiguration.java  |   8 +-
 .../coprocessor/TestCoprocessorInterface.java      |  13 +-
 .../hbase/coprocessor/TestCoprocessorMetrics.java  |  73 +-
 .../TestMasterCoprocessorExceptionWithAbort.java   |   9 +-
 .../TestMasterCoprocessorExceptionWithRemove.java  |  15 +-
 .../hbase/coprocessor/TestMasterObserver.java      |  30 +-
 .../coprocessor/TestOpenTableInCoprocessor.java    |  27 +-
 ...ObserverForAddingMutationsFromCoprocessors.java |  10 +-
 .../coprocessor/TestRegionObserverInterface.java   |  82 +-
 .../TestRegionObserverScannerOpenHook.java         |  37 +-
 .../coprocessor/TestRegionObserverStacking.java    |  15 +-
 .../hadoop/hbase/filter/FilterTestingCluster.java  |  10 +-
 .../hbase/filter/TestDependentColumnFilter.java    |  20 +-
 .../org/apache/hadoop/hbase/filter/TestFilter.java |  45 +-
 .../hbase/filter/TestFilterFromRegionSide.java     |  11 +-
 .../hadoop/hbase/filter/TestFilterWrapper.java     |  16 +-
 .../hbase/filter/TestInvocationRecordFilter.java   |  10 +-
 .../org/apache/hadoop/hbase/io/TestHeapSize.java   |  16 +-
 .../io/encoding/TestSeekBeforeWithReverseScan.java |  10 +-
 .../hfile/TestScannerSelectionUsingKeyRange.java   |  12 +-
 .../hadoop/hbase/master/AbstractTestDLS.java       | 167 +---
 .../org/apache/hadoop/hbase/master/TestMaster.java |   8 +-
 .../hbase/master/TestMasterMetricsWrapper.java     |   8 +-
 .../hadoop/hbase/master/TestRegionPlacement.java   |   8 +-
 .../hbase/master/TestRegionPlansWithThrottle.java  |   6 +-
 .../master/assignment/MockMasterServices.java      |  18 +-
 .../hbase/master/assignment/TestRegionStates.java  |  12 +-
 .../balancer/TestFavoredNodeTableImport.java       |  13 +-
 .../TestFavoredStochasticLoadBalancer.java         |  80 +-
 .../TestDeleteColumnFamilyProcedureFromClient.java |  16 +-
 .../master/procedure/TestModifyTableProcedure.java |  83 +-
 .../procedure/TestRestoreSnapshotProcedure.java    |  55 +-
 .../TestTableDescriptorModificationFromClient.java |  94 +-
 .../hadoop/hbase/mob/MobStressToolRunner.java      |  21 +-
 .../hbase/mob/TestDefaultMobStoreFlusher.java      |  83 +-
 .../hadoop/hbase/mob/TestMobCompactionOptMode.java |   7 +-
 .../mob/TestMobCompactionOptRegionBatchMode.java   |   1 -
 .../TestMobCompactionRegularRegionBatchMode.java   |   1 -
 .../hbase/mob/TestMobCompactionWithDefaults.java   |  17 +-
 .../hadoop/hbase/mob/TestMobDataBlockEncoding.java |  20 +-
 .../hadoop/hbase/mob/TestMobFileCleanerChore.java  |  19 +-
 .../hadoop/hbase/mob/TestMobStoreCompaction.java   |  15 +-
 .../hadoop/hbase/mob/TestMobStoreScanner.java      |  19 +-
 .../hbase/namespace/TestNamespaceAuditor.java      |  13 +-
 .../hbase/procedure/SimpleRSProcedureManager.java  |  15 +-
 .../hbase/quotas/SpaceQuotaHelperForTests.java     |  64 +-
 .../hadoop/hbase/quotas/TestSpaceQuotaRemoval.java |  58 ++
 .../hbase/regionserver/TestAtomicOperation.java    |  23 +-
 .../hbase/regionserver/TestBlocksScanned.java      |  37 +-
 .../hadoop/hbase/regionserver/TestBulkLoad.java    |   9 +-
 .../hbase/regionserver/TestCleanupMetaWAL.java     |   2 +-
 .../regionserver/TestClearRegionBlockCache.java    |  37 +-
 .../hbase/regionserver/TestColumnSeeking.java      |  13 +-
 .../hbase/regionserver/TestCompactSplitThread.java |  14 +-
 .../hbase/regionserver/TestCompactingMemStore.java |  11 +-
 .../hadoop/hbase/regionserver/TestCompaction.java  |  19 +-
 .../hbase/regionserver/TestCompactionPolicy.java   |  13 +-
 .../hbase/regionserver/TestDeleteMobTable.java     |  23 +-
 .../hbase/regionserver/TestFSErrorsExposed.java    |  11 +-
 .../hadoop/hbase/regionserver/TestHRegion.java     | 698 +++++++++++++--
 .../hbase/regionserver/TestHRegionOnCluster.java   |   7 +-
 .../hbase/regionserver/TestIsDeleteFailure.java    |   6 +-
 .../hbase/regionserver/TestJoinedScanners.java     |  15 +-
 .../hadoop/hbase/regionserver/TestKeepDeletes.java |  29 +-
 .../hadoop/hbase/regionserver/TestLogRoller.java   | 133 ++-
 .../hbase/regionserver/TestMajorCompaction.java    |  10 +-
 .../regionserver/TestMetricsRegionServer.java      |   3 +-
 .../regionserver/TestMetricsUserAggregate.java     |  14 +-
 .../hbase/regionserver/TestMinorCompaction.java    |  13 +-
 .../hbase/regionserver/TestMultiLogThreshold.java  |   5 +-
 .../hbase/regionserver/TestMutateRowsRecovery.java |   7 +-
 .../TestNewVersionBehaviorFromClientSide.java      |  11 +-
 .../hadoop/hbase/regionserver/TestParallelPut.java |  10 +-
 .../regionserver/TestPerColumnFamilyFlush.java     |  40 +-
 .../hbase/regionserver/TestRSStatusServlet.java    |  15 +-
 .../hadoop/hbase/regionserver/TestRegionOpen.java  |  16 +-
 .../regionserver/TestRegionReplicaFailover.java    |  30 +-
 .../TestRegionReplicasAreDistributed.java          |   7 +-
 .../TestRegionReplicasWithModifyTable.java         |   8 +-
 .../TestRegionServerReportForDuty.java             |   6 +-
 .../hbase/regionserver/TestResettingCounters.java  |  11 +-
 .../hbase/regionserver/TestReversibleScanners.java |  10 +-
 .../hadoop/hbase/regionserver/TestRowTooBig.java   |  27 +-
 .../regionserver/TestSCVFWithMiniCluster.java      |  16 +-
 .../hadoop/hbase/regionserver/TestScanner.java     |  23 +-
 .../regionserver/TestScannerWithCorruptHFile.java  |   9 +-
 .../TestSettingTimeoutOnBlockingPoint.java         |  11 +-
 .../TestSplitTransactionOnCluster.java             |  12 +-
 .../regionserver/TestStoreScannerClosure.java      |   7 +-
 .../apache/hadoop/hbase/regionserver/TestTags.java |  42 +-
 .../TestWalAndCompactingMemStoreFlush.java         |  18 +-
 .../compactions/TestCompactedHFilesDischarger.java |   7 +-
 .../compactions/TestDateTieredCompactor.java       |   4 +-
 .../compactions/TestFIFOCompactionPolicy.java      |   5 +
 .../compactions/TestStripeCompactionPolicy.java    |   5 +-
 .../compactions/TestStripeCompactor.java           |   4 +-
 .../hbase/regionserver/wal/AbstractTestFSWAL.java  | 231 +++--
 .../regionserver/wal/AbstractTestWALReplay.java    | 363 ++++----
 .../hbase/regionserver/wal/TestAsyncFSWAL.java     |   5 +-
 .../hbase/regionserver/wal/TestAsyncWALReplay.java |   5 +-
 .../regionserver/wal/TestSequenceIdAccounting.java |  10 +-
 .../hbase/replication/TestMasterReplication.java   |  17 +-
 .../replication/TestMultiSlaveReplication.java     | 151 ++--
 .../replication/TestPerTableCFReplication.java     | 102 +--
 .../hbase/replication/TestReplicationBase.java     |  33 +-
 .../hbase/replication/TestReplicationEndpoint.java |  45 +-
 .../hbase/replication/TestReplicationWithTags.java |  14 +-
 .../TestGlobalReplicationThrottler.java            |  23 +-
 .../TestRegionReplicaReplicationEndpoint.java      | 202 ++---
 ...stRegionReplicaReplicationEndpointNoMaster.java |  10 +-
 .../regionserver/TestReplicationSink.java          |  48 +-
 .../regionserver/TestReplicationSource.java        | 250 ++++--
 .../regionserver/TestWALEntrySinkFilter.java       |   2 +-
 .../regionserver/TestWALEntryStream.java           |   5 +
 .../hbase/rsgroup/VerifyingRSGroupAdmin.java       |   4 +
 .../security/access/TestAccessController.java      |  84 +-
 .../security/access/TestAccessController2.java     |  22 +-
 .../security/access/TestAccessController3.java     |  22 +-
 .../access/TestCellACLWithMultipleVersions.java    |  20 +-
 .../hadoop/hbase/security/access/TestCellACLs.java |  14 +-
 .../TestCoprocessorWhitelistMasterObserver.java    |  48 +-
 .../security/access/TestNamespaceCommands.java     |   8 +-
 .../security/access/TestScanEarlyTermination.java  |  26 +-
 .../access/TestWithDisabledAuthorization.java      |  38 +-
 .../token/TestDelegationTokenWithEncryption.java   |   8 +-
 ...tVisibilityLabelReplicationWithExpAsString.java |  10 +-
 .../security/visibility/TestVisibilityLabels.java  |  35 +-
 .../TestVisibilityLabelsReplication.java           |  10 +-
 .../visibility/TestVisibilityWithCheckAuths.java   |  10 +-
 .../hadoop/hbase/util/BaseTestHBaseFsck.java       |   5 +-
 .../apache/hadoop/hbase/util/HFileTestUtil.java    |   5 +-
 .../apache/hadoop/hbase/util/TestHBaseFsckMOB.java |   6 +-
 .../hbase/util/TestMiniClusterLoadSequential.java  |   9 +-
 ...{TestRegionMover.java => TestRegionMover1.java} |  31 +-
 .../apache/hadoop/hbase/util/TestRegionMover2.java | 215 +++++
 .../hadoop/hbase/wal/TestFSHLogProvider.java       |   7 +-
 .../apache/hadoop/hbase/wal/TestWALFactory.java    |   4 +-
 .../hadoop/hbase/wal/TestWALReaderOnSecureWAL.java |  22 +-
 .../org/apache/hadoop/hbase/wal/TestWALSplit.java  |  56 +-
 hbase-shell/src/main/ruby/hbase/admin.rb           |   9 +-
 hbase-shell/src/main/ruby/hbase/quotas.rb          |   3 +
 hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb   |   8 +-
 hbase-shell/src/main/ruby/hbase/security.rb        |   2 -
 hbase-shell/src/main/ruby/hbase/table.rb           |  89 +-
 hbase-shell/src/main/ruby/hbase/taskmonitor.rb     |  25 +-
 hbase-shell/src/main/ruby/hbase_constants.rb       |   4 +-
 hbase-shell/src/main/ruby/shell.rb                 | 130 ++-
 .../ruby/shell/commands/append_peer_namespaces.rb  |   2 +-
 .../ruby/shell/commands/clear_compaction_queues.rb |   2 +-
 .../main/ruby/shell/commands/clear_deadservers.rb  |   2 +-
 .../src/main/ruby/shell/commands/clone_snapshot.rb |   2 +-
 .../main/ruby/shell/commands/clone_table_schema.rb |   2 +-
 .../src/main/ruby/shell/commands/compact.rb        |   2 +-
 .../src/main/ruby/shell/commands/compact_rs.rb     |   2 +-
 .../main/ruby/shell/commands/compaction_state.rb   |   2 +-
 .../main/ruby/shell/commands/compaction_switch.rb  |   2 +-
 .../shell/commands/decommission_regionservers.rb   |   2 +-
 .../src/main/ruby/shell/commands/describe.rb       |   2 +-
 .../main/ruby/shell/commands/describe_namespace.rb |   2 +-
 .../ruby/shell/commands/disable_rpc_throttle.rb    |   2 +-
 .../ruby/shell/commands/enable_rpc_throttle.rb     |   2 +-
 hbase-shell/src/main/ruby/shell/commands/flush.rb  |   4 +-
 .../main/ruby/shell/commands/get_peer_config.rb    |   2 +-
 .../src/main/ruby/shell/commands/hbck_chore_run.rb |   3 +-
 .../main/ruby/shell/commands/list_deadservers.rb   |   2 +-
 .../commands/list_decommissioned_regionservers.rb  |   2 +-
 .../main/ruby/shell/commands/list_peer_configs.rb  |   2 +-
 .../src/main/ruby/shell/commands/list_peers.rb     |   2 +-
 .../src/main/ruby/shell/commands/list_regions.rb   |  22 +-
 .../src/main/ruby/shell/commands/major_compact.rb  |   2 +-
 .../ruby/shell/commands/move_namespaces_rsgroup.rb |   2 +-
 .../commands/move_servers_namespaces_rsgroup.rb    |   2 +-
 .../src/main/ruby/shell/commands/processlist.rb    |   4 +
 .../shell/commands/recommission_regionserver.rb    |   2 +-
 .../ruby/shell/commands/remove_peer_namespaces.rb  |   2 +-
 .../main/ruby/shell/commands/restore_snapshot.rb   |   2 +-
 .../shell/commands/set_peer_exclude_namespaces.rb  |   2 +-
 .../shell/commands/set_peer_exclude_tableCFs.rb    |   2 +-
 .../ruby/shell/commands/set_peer_namespaces.rb     |   2 +-
 .../ruby/shell/commands/set_peer_replicate_all.rb  |   2 +-
 .../main/ruby/shell/commands/set_peer_serial.rb    |   2 +-
 .../main/ruby/shell/commands/set_peer_tableCFs.rb  |   2 +-
 .../src/main/ruby/shell/commands/set_quota.rb      |  29 +-
 .../main/ruby/shell/commands/show_peer_tableCFs.rb |   2 +-
 .../ruby/shell/commands/splitormerge_enabled.rb    |   4 +-
 .../ruby/shell/commands/splitormerge_switch.rb     |   4 +-
 .../src/main/ruby/shell/commands/truncate.rb       |   5 +-
 .../main/ruby/shell/commands/truncate_preserve.rb  |   7 +-
 hbase-shell/src/test/ruby/hbase/admin2_test.rb     |   7 +-
 hbase-shell/src/test/ruby/hbase/admin_test.rb      |  18 +-
 .../ruby/hbase/list_regions_test_no_cluster.rb     |   3 +-
 hbase-shell/src/test/ruby/hbase/quotas_test.rb     |   4 +-
 .../src/test/ruby/hbase/quotas_test_no_cluster.rb  |   3 +-
 .../src/test/ruby/hbase/replication_admin_test.rb  |  90 +-
 .../src/test/ruby/hbase/security_admin_test.rb     |   3 +-
 hbase-shell/src/test/ruby/hbase/table_test.rb      |  34 +-
 .../test/ruby/hbase/test_connection_no_cluster.rb  |   3 +-
 .../ruby/hbase/visibility_labels_admin_test.rb     |   3 +-
 hbase-shell/src/test/ruby/shell/commands_test.rb   |  21 +-
 hbase-shell/src/test/ruby/shell/converter_test.rb  |   3 +-
 .../src/test/ruby/shell/list_procedures_test.rb    |   3 +-
 hbase-shell/src/test/ruby/shell/shell_test.rb      |  58 ++
 hbase-shell/src/test/ruby/test_helper.rb           |   2 +-
 .../hadoop/hbase/thrift/IncrementCoalescer.java    |   7 +-
 .../hbase/thrift/ThriftHBaseServiceHandler.java    |  21 +-
 .../hadoop/hbase/thrift/ThriftUtilities.java       |  30 +-
 .../hadoop/hbase/thrift2/ThriftUtilities.java      |  18 +-
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   |   5 +
 .../main/resources/hbase-webapps/thrift/thrift.jsp |   6 +
 .../thrift2/TestThriftHBaseServiceHandler.java     |  37 +-
 .../TestThriftHBaseServiceHandlerWithLabels.java   |  19 +-
 .../TestThriftHBaseServiceHandlerWithReadOnly.java |  19 +-
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   |   4 +-
 .../apache/hadoop/hbase/zookeeper/ZKSplitLog.java  |   3 +
 .../apache/hadoop/hbase/zookeeper/ZKWatcher.java   |   6 +-
 pom.xml                                            |   9 -
 src/main/asciidoc/_chapters/hbase-default.adoc     |   7 +-
 src/main/asciidoc/_chapters/shell.adoc             |   2 +-
 src/site/site.xml                                  |   8 -
 424 files changed, 7470 insertions(+), 8898 deletions(-)
 delete mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
 delete mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
 delete mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java
 delete mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
 delete mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestReusablePoolMap.java
 copy hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/{MetricsReplicationGlobalSourceSource.java => MetricsReplicationGlobalSourceSourceImpl.java} (92%)
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java
 rename hbase-server/src/test/java/org/apache/hadoop/hbase/{TestHColumnDescriptorDefaultVersions.java => TestColumnFamilyDescriptorDefaultVersions.java} (72%)
 rename hbase-server/src/test/java/org/apache/hadoop/hbase/util/{TestRegionMover.java => TestRegionMover1.java} (96%)
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java


[hbase] 02/02: HBASE-24681 Remove the cache walsById/walsByIdRecoveredQueues from ReplicationSourceManager (#2019)

Posted by zg...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zghao pushed a commit to branch HBASE-24666
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9c61d14c037caf52b7d377893efef72cd4e9dadc
Author: Guanghao Zhang <zg...@apache.org>
AuthorDate: Mon Jul 13 17:35:32 2020 +0800

    HBASE-24681 Remove the cache walsById/walsByIdRecoveredQueues from ReplicationSourceManager (#2019)
    
    Signed-off-by: Wellington Chevreuil <wc...@apache.org>
---
 .../regionserver/ReplicationSourceManager.java     | 204 +++++++--------------
 1 file changed, 62 insertions(+), 142 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 7fcfa55..744f9a0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -91,30 +91,6 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto
  * <li>No need synchronized on {@link #sources}. {@link #sources} is a ConcurrentHashMap and there
  * is a Lock for peer id in {@link PeerProcedureHandlerImpl}. So there is no race for peer
  * operations.</li>
- * <li>Need synchronized on {@link #walsById}. There are four methods which modify it,
- * {@link #addPeer(String)}, {@link #removePeer(String)},
- * {@link #cleanOldLogs(String, boolean, ReplicationSourceInterface)} and {@link #preLogRoll(Path)}.
- * {@link #walsById} is a ConcurrentHashMap and there is a Lock for peer id in
- * {@link PeerProcedureHandlerImpl}. So there is no race between {@link #addPeer(String)} and
- * {@link #removePeer(String)}. {@link #cleanOldLogs(String, boolean, ReplicationSourceInterface)}
- * is called by {@link ReplicationSourceInterface}. So no race with {@link #addPeer(String)}.
- * {@link #removePeer(String)} will terminate the {@link ReplicationSourceInterface} firstly, then
- * remove the wals from {@link #walsById}. So no race with {@link #removePeer(String)}. The only
- * case need synchronized is {@link #cleanOldLogs(String, boolean, ReplicationSourceInterface)} and
- * {@link #preLogRoll(Path)}.</li>
- * <li>No need synchronized on {@link #walsByIdRecoveredQueues}. There are three methods which
- * modify it, {@link #removePeer(String)} ,
- * {@link #cleanOldLogs(String, boolean, ReplicationSourceInterface)} and
- * {@link ReplicationSourceManager.NodeFailoverWorker#run()}.
- * {@link #cleanOldLogs(String, boolean, ReplicationSourceInterface)} is called by
- * {@link ReplicationSourceInterface}. {@link #removePeer(String)} will terminate the
- * {@link ReplicationSourceInterface} firstly, then remove the wals from
- * {@link #walsByIdRecoveredQueues}. And {@link ReplicationSourceManager.NodeFailoverWorker#run()}
- * will add the wals to {@link #walsByIdRecoveredQueues} firstly, then start up a
- * {@link ReplicationSourceInterface}. So there is no race here. For
- * {@link ReplicationSourceManager.NodeFailoverWorker#run()} and {@link #removePeer(String)}, there
- * is already synchronized on {@link #oldsources}. So no need synchronized on
- * {@link #walsByIdRecoveredQueues}.</li>
  * <li>Need synchronized on {@link #latestPaths} to avoid the new open source miss new log.</li>
  * <li>Need synchronized on {@link #oldsources} to avoid adding recovered source for the
  * to-be-removed peer.</li>
@@ -135,15 +111,6 @@ public class ReplicationSourceManager implements ReplicationListener {
   // All about stopping
   private final Server server;
 
-  // All logs we are currently tracking
-  // Index structure of the map is: queue_id->logPrefix/logGroup->logs
-  // For normal replication source, the peer id is same with the queue id
-  private final ConcurrentMap<String, Map<String, NavigableSet<String>>> walsById;
-  // Logs for recovered sources we are currently tracking
-  // the map is: queue_id->logPrefix/logGroup->logs
-  // For recovered source, the queue id's format is peer_id-servername-*
-  private final ConcurrentMap<String, Map<String, NavigableSet<String>>> walsByIdRecoveredQueues;
-
   private final SyncReplicationPeerMappingManager syncReplicationPeerMappingManager;
 
   private final Configuration conf;
@@ -199,8 +166,6 @@ public class ReplicationSourceManager implements ReplicationListener {
     this.replicationPeers = replicationPeers;
     this.replicationTracker = replicationTracker;
     this.server = server;
-    this.walsById = new ConcurrentHashMap<>();
-    this.walsByIdRecoveredQueues = new ConcurrentHashMap<>();
     this.oldsources = new ArrayList<>();
     this.conf = conf;
     this.fs = fs;
@@ -338,7 +303,6 @@ public class ReplicationSourceManager implements ReplicationListener {
       // Delete queue from storage and memory and queue id is same with peer id for normal
       // source
       deleteQueue(peerId);
-      this.walsById.remove(peerId);
     }
     ReplicationPeerConfig peerConfig = peer.getPeerConfig();
     if (peerConfig.isSyncReplication()) {
@@ -379,15 +343,10 @@ public class ReplicationSourceManager implements ReplicationListener {
     // synchronized on latestPaths to avoid missing the new log
     synchronized (this.latestPaths) {
       this.sources.put(peerId, src);
-      Map<String, NavigableSet<String>> walsByGroup = new HashMap<>();
-      this.walsById.put(peerId, walsByGroup);
       // Add the latest wal to that source's queue
       if (!latestPaths.isEmpty()) {
         for (Map.Entry<String, Path> walPrefixAndPath : latestPaths.entrySet()) {
           Path walPath = walPrefixAndPath.getValue();
-          NavigableSet<String> wals = new TreeSet<>();
-          wals.add(walPath.getName());
-          walsByGroup.put(walPrefixAndPath.getKey(), wals);
           // Abort RS and throw exception to make add peer failed
           abortAndThrowIOExceptionWhenFail(
             () -> this.queueStorage.addWAL(server.getServerName(), peerId, walPath.getName()));
@@ -441,7 +400,10 @@ public class ReplicationSourceManager implements ReplicationListener {
       // map from walsById since later we may fail to delete them from the replication queue
       // storage, and when we retry next time, we can not know the wal files that need to be deleted
       // from the replication queue storage.
-      walsById.get(peerId).forEach((k, v) -> wals.put(k, new TreeSet<>(v)));
+      this.queueStorage.getWALsInQueue(this.server.getServerName(), peerId).forEach(wal -> {
+        String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal);
+        wals.computeIfAbsent(walPrefix, p -> new TreeSet<>()).add(wal);
+      });
     }
     LOG.info("Startup replication source for " + src.getPeerId());
     src.startup();
@@ -450,15 +412,6 @@ public class ReplicationSourceManager implements ReplicationListener {
         queueStorage.removeWAL(server.getServerName(), peerId, wal);
       }
     }
-    synchronized (walsById) {
-      Map<String, NavigableSet<String>> oldWals = walsById.get(peerId);
-      wals.forEach((k, v) -> {
-        NavigableSet<String> walsByGroup = oldWals.get(k);
-        if (walsByGroup != null) {
-          walsByGroup.removeAll(v);
-        }
-      });
-    }
     // synchronized on oldsources to avoid race with NodeFailoverWorker. Since NodeFailoverWorker is
     // a background task, we will delete the file from replication queue storage under the lock to
     // simplify the logic.
@@ -470,7 +423,6 @@ public class ReplicationSourceManager implements ReplicationListener {
           oldSource.terminate(terminateMessage);
           oldSource.getSourceMetrics().clear();
           queueStorage.removeQueue(server.getServerName(), queueId);
-          walsByIdRecoveredQueues.remove(queueId);
           iter.remove();
         }
       }
@@ -483,7 +435,7 @@ public class ReplicationSourceManager implements ReplicationListener {
    * replication queue storage and only to enqueue all logs to the new replication source
    * @param peerId the id of the replication peer
    */
-  public void refreshSources(String peerId) throws IOException {
+  public void refreshSources(String peerId) throws ReplicationException, IOException {
     String terminateMessage = "Peer " + peerId +
       " state or config changed. Will close the previous replication source and open a new one";
     ReplicationPeer peer = replicationPeers.getPeer(peerId);
@@ -496,9 +448,8 @@ public class ReplicationSourceManager implements ReplicationListener {
         // Do not clear metrics
         toRemove.terminate(terminateMessage, null, false);
       }
-      for (NavigableSet<String> walsByGroup : walsById.get(peerId).values()) {
-        walsByGroup.forEach(wal -> src.enqueueLog(new Path(this.logDir, wal)));
-      }
+      this.queueStorage.getWALsInQueue(this.server.getServerName(), peerId)
+        .forEach(wal -> src.enqueueLog(new Path(this.logDir, wal)));
     }
     LOG.info("Startup replication source for " + src.getPeerId());
     src.startup();
@@ -519,9 +470,8 @@ public class ReplicationSourceManager implements ReplicationListener {
       for (String queueId : previousQueueIds) {
         ReplicationSourceInterface replicationSource = createSource(queueId, peer);
         this.oldsources.add(replicationSource);
-        for (SortedSet<String> walsByGroup : walsByIdRecoveredQueues.get(queueId).values()) {
-          walsByGroup.forEach(wal -> src.enqueueLog(new Path(wal)));
-        }
+        this.queueStorage.getWALsInQueue(this.server.getServerName(), queueId)
+          .forEach(wal -> src.enqueueLog(new Path(wal)));
         toStartup.add(replicationSource);
       }
     }
@@ -541,7 +491,6 @@ public class ReplicationSourceManager implements ReplicationListener {
     LOG.info("Done with the recovered queue {}", src.getQueueId());
     // Delete queue from storage and memory
     deleteQueue(src.getQueueId());
-    this.walsByIdRecoveredQueues.remove(src.getQueueId());
     return true;
   }
 
@@ -564,8 +513,6 @@ public class ReplicationSourceManager implements ReplicationListener {
     this.sources.remove(src.getPeerId());
     // Delete queue from storage and memory
     deleteQueue(src.getQueueId());
-    this.walsById.remove(src.getQueueId());
-
   }
 
   /**
@@ -651,42 +598,19 @@ public class ReplicationSourceManager implements ReplicationListener {
    * @param source the replication source
    */
   @VisibleForTesting
-  void cleanOldLogs(String log, boolean inclusive, ReplicationSourceInterface source) {
-    String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(log);
-    if (source.isRecovered()) {
-      NavigableSet<String> wals = walsByIdRecoveredQueues.get(source.getQueueId()).get(logPrefix);
-      if (wals != null) {
-        NavigableSet<String> walsToRemove = wals.headSet(log, inclusive);
-        if (walsToRemove.isEmpty()) {
-          return;
-        }
-        cleanOldLogs(walsToRemove, source);
-        walsToRemove.clear();
-      }
-    } else {
-      NavigableSet<String> wals;
-      NavigableSet<String> walsToRemove;
-      // synchronized on walsById to avoid race with preLogRoll
-      synchronized (this.walsById) {
-        wals = walsById.get(source.getQueueId()).get(logPrefix);
-        if (wals == null) {
-          return;
-        }
-        walsToRemove = wals.headSet(log, inclusive);
-        if (walsToRemove.isEmpty()) {
-          return;
-        }
-        walsToRemove = new TreeSet<>(walsToRemove);
-      }
-      // cleanOldLogs may spend some time, especially for sync replication where we may want to
-      // remove remote wals as the remote cluster may have already been down, so we do it outside
-      // the lock to avoid block preLogRoll
-      cleanOldLogs(walsToRemove, source);
-      // now let's remove the files in the set
-      synchronized (this.walsById) {
-        wals.removeAll(walsToRemove);
-      }
+  void cleanOldLogs(String log, boolean inclusive,
+    ReplicationSourceInterface source) {
+    NavigableSet<String> walsToRemove;
+    synchronized (this.latestPaths) {
+      walsToRemove = getWalsToRemove(source.getQueueId(), log, inclusive);
     }
+    if (walsToRemove.isEmpty()) {
+      return;
+    }
+    // cleanOldLogs may spend some time, especially for sync replication where we may want to
+    // remove remote wals as the remote cluster may have already been down, so we do it outside
+    // the lock to avoid block preLogRoll
+    cleanOldLogs(walsToRemove, source);
   }
 
   private void removeRemoteWALs(String peerId, String remoteWALDir, Collection<String> wals)
@@ -767,37 +691,6 @@ public class ReplicationSourceManager implements ReplicationListener {
         abortAndThrowIOExceptionWhenFail(
           () -> this.queueStorage.addWAL(server.getServerName(), source.getQueueId(), logName));
       }
-
-      // synchronized on walsById to avoid race with cleanOldLogs
-      synchronized (this.walsById) {
-        // Update walsById map
-        for (Map.Entry<String, Map<String, NavigableSet<String>>> entry : this.walsById
-          .entrySet()) {
-          String peerId = entry.getKey();
-          Map<String, NavigableSet<String>> walsByPrefix = entry.getValue();
-          boolean existingPrefix = false;
-          for (Map.Entry<String, NavigableSet<String>> walsEntry : walsByPrefix.entrySet()) {
-            SortedSet<String> wals = walsEntry.getValue();
-            if (this.sources.isEmpty()) {
-              // If there's no slaves, don't need to keep the old wals since
-              // we only consider the last one when a new slave comes in
-              wals.clear();
-            }
-            if (logPrefix.equals(walsEntry.getKey())) {
-              wals.add(logName);
-              existingPrefix = true;
-            }
-          }
-          if (!existingPrefix) {
-            // The new log belongs to a new group, add it into this peer
-            LOG.debug("Start tracking logs for wal group {} for peer {}", logPrefix, peerId);
-            NavigableSet<String> wals = new TreeSet<>();
-            wals.add(logName);
-            walsByPrefix.put(logPrefix, wals);
-          }
-        }
-      }
-
       // Add to latestPaths
       latestPaths.put(logPrefix, newLog);
     }
@@ -969,18 +862,6 @@ public class ReplicationSourceManager implements ReplicationListener {
                 continue;
               }
             }
-            // track sources in walsByIdRecoveredQueues
-            Map<String, NavigableSet<String>> walsByGroup = new HashMap<>();
-            walsByIdRecoveredQueues.put(queueId, walsByGroup);
-            for (String wal : walsSet) {
-              String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal);
-              NavigableSet<String> wals = walsByGroup.get(walPrefix);
-              if (wals == null) {
-                wals = new TreeSet<>();
-                walsByGroup.put(walPrefix, wals);
-              }
-              wals.add(wal);
-            }
             oldsources.add(src);
             LOG.trace("Added source for recovered queue: " + src.getQueueId());
             for (String wal : walsSet) {
@@ -1012,7 +893,18 @@ public class ReplicationSourceManager implements ReplicationListener {
    * @return a sorted set of wal names
    */
   @VisibleForTesting
-  public Map<String, Map<String, NavigableSet<String>>> getWALs() {
+  public Map<String, Map<String, NavigableSet<String>>> getWALs()
+    throws ReplicationException {
+    Map<String, Map<String, NavigableSet<String>>> walsById = new HashMap<>();
+    for (ReplicationSourceInterface source : sources.values()) {
+      String queueId = source.getQueueId();
+      Map<String, NavigableSet<String>> walsByGroup = new HashMap<>();
+      walsById.put(queueId, walsByGroup);
+      for (String wal : this.queueStorage.getWALsInQueue(this.server.getServerName(), queueId)) {
+        String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal);
+        walsByGroup.computeIfAbsent(walPrefix, p -> new TreeSet<>()).add(wal);
+      }
+    }
     return Collections.unmodifiableMap(walsById);
   }
 
@@ -1021,7 +913,18 @@ public class ReplicationSourceManager implements ReplicationListener {
    * @return a sorted set of wal names
    */
   @VisibleForTesting
-  Map<String, Map<String, NavigableSet<String>>> getWalsByIdRecoveredQueues() {
+  Map<String, Map<String, NavigableSet<String>>> getWalsByIdRecoveredQueues()
+    throws ReplicationException {
+    Map<String, Map<String, NavigableSet<String>>> walsByIdRecoveredQueues = new HashMap<>();
+    for (ReplicationSourceInterface source : oldsources) {
+      String queueId = source.getQueueId();
+      Map<String, NavigableSet<String>> walsByGroup = new HashMap<>();
+      walsByIdRecoveredQueues.put(queueId, walsByGroup);
+      for (String wal : this.queueStorage.getWALsInQueue(this.server.getServerName(), queueId)) {
+        String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal);
+        walsByGroup.computeIfAbsent(walPrefix, p -> new TreeSet<>()).add(wal);
+      }
+    }
     return Collections.unmodifiableMap(walsByIdRecoveredQueues);
   }
 
@@ -1200,4 +1103,21 @@ public class ReplicationSourceManager implements ReplicationListener {
   MetricsReplicationGlobalSourceSource getGlobalMetrics() {
     return this.globalMetrics;
   }
+
+  private NavigableSet<String> getWalsToRemove(String queueId, String log, boolean inclusive) {
+    NavigableSet<String> walsToRemove = new TreeSet<>();
+    String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(log);
+    try {
+      this.queueStorage.getWALsInQueue(this.server.getServerName(), queueId).forEach(wal -> {
+        String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal);
+        if (walPrefix.equals(logPrefix)) {
+          walsToRemove.add(wal);
+        }
+      });
+    } catch (ReplicationException e) {
+      // Just log the exception here, as the recovered replication source will try to cleanup again.
+      LOG.warn("Failed to read wals in queue {}", queueId, e);
+    }
+    return walsToRemove.headSet(log, inclusive);
+  }
 }


[hbase] 01/02: HBASE-24682 Refactor ReplicationSource#addHFileRefs method: move it to ReplicationSourceManager (#2020)

Posted by zg...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zghao pushed a commit to branch HBASE-24666
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d45171fce92ce9be46cbd1eb5ec6e485cdbf1f03
Author: Guanghao Zhang <zg...@apache.org>
AuthorDate: Wed Jul 8 14:29:08 2020 +0800

    HBASE-24682 Refactor ReplicationSource#addHFileRefs method: move it to ReplicationSourceManager (#2020)
    
    Signed-off-by: Wellington Chevreuil <wc...@apache.org>
---
 .../regionserver/ReplicationSource.java            | 38 +----------------
 .../regionserver/ReplicationSourceInterface.java   | 14 -------
 .../regionserver/ReplicationSourceManager.java     | 48 +++++++++++++++++++++-
 .../hbase/replication/ReplicationSourceDummy.java  |  9 +---
 4 files changed, 49 insertions(+), 60 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index f24ecfa..85c4657 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -28,7 +28,6 @@ import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 import java.util.TreeMap;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
@@ -37,6 +36,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.Predicate;
+
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -46,21 +46,17 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
 import org.apache.hadoop.hbase.replication.ChainWALEntryFilter;
 import org.apache.hadoop.hbase.replication.ClusterMarkingEntryFilter;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
@@ -265,38 +261,6 @@ public class ReplicationSource implements ReplicationSourceInterface {
     }
   }
 
-  @Override
-  public void addHFileRefs(TableName tableName, byte[] family, List<Pair<Path, Path>> pairs)
-      throws ReplicationException {
-    String peerId = replicationPeer.getId();
-    Set<String> namespaces = replicationPeer.getNamespaces();
-    Map<TableName, List<String>> tableCFMap = replicationPeer.getTableCFs();
-    if (tableCFMap != null) { // All peers with TableCFs
-      List<String> tableCfs = tableCFMap.get(tableName);
-      if (tableCFMap.containsKey(tableName)
-          && (tableCfs == null || tableCfs.contains(Bytes.toString(family)))) {
-        this.queueStorage.addHFileRefs(peerId, pairs);
-        metrics.incrSizeOfHFileRefsQueue(pairs.size());
-      } else {
-        LOG.debug("HFiles will not be replicated belonging to the table {} family {} to peer id {}",
-            tableName, Bytes.toString(family), peerId);
-      }
-    } else if (namespaces != null) { // Only for set NAMESPACES peers
-      if (namespaces.contains(tableName.getNamespaceAsString())) {
-        this.queueStorage.addHFileRefs(peerId, pairs);
-        metrics.incrSizeOfHFileRefsQueue(pairs.size());
-      } else {
-        LOG.debug("HFiles will not be replicated belonging to the table {} family {} to peer id {}",
-            tableName, Bytes.toString(family), peerId);
-      }
-    } else {
-      // user has explicitly not defined any table cfs for replication, means replicate all the
-      // data
-      this.queueStorage.addHFileRefs(peerId, pairs);
-      metrics.incrSizeOfHFileRefsQueue(pairs.size());
-    }
-  }
-
   private ReplicationEndpoint createReplicationEndpoint()
       throws InstantiationException, IllegalAccessException, ClassNotFoundException, IOException {
     RegionServerCoprocessorHost rsServerHost = null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
index 0bd90cf..33a413f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
@@ -29,12 +29,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -63,17 +60,6 @@ public interface ReplicationSourceInterface {
   void enqueueLog(Path log);
 
   /**
-   * Add hfile names to the queue to be replicated.
-   * @param tableName Name of the table these files belongs to
-   * @param family Name of the family these files belong to
-   * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir which
-   *          will be added in the queue for replication}
-   * @throws ReplicationException If failed to add hfile references
-   */
-  void addHFileRefs(TableName tableName, byte[] family, List<Pair<Path, Path>> pairs)
-      throws ReplicationException;
-
-  /**
    * Start the replication
    */
   void startup();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 2cf91ed..7fcfa55 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider;
@@ -173,6 +174,8 @@ public class ReplicationSourceManager implements ReplicationListener {
   private final long totalBufferLimit;
   private final MetricsReplicationGlobalSourceSource globalMetrics;
 
+  private final Map<String, MetricsSource> sourceMetrics = new HashMap<>();
+
   /**
    * Creates a replication manager and sets the watch on all the other registered region servers
    * @param queueStorage the interface for manipulating replication queues
@@ -355,6 +358,7 @@ public class ReplicationSourceManager implements ReplicationListener {
     ReplicationSourceInterface src = ReplicationSourceFactory.create(conf, queueId);
 
     MetricsSource metrics = new MetricsSource(queueId);
+    sourceMetrics.put(queueId, metrics);
     // init replication source
     src.init(conf, fs, this, queueStorage, replicationPeer, server, queueId, clusterId,
       walFileLengthProvider, metrics);
@@ -1139,7 +1143,49 @@ public class ReplicationSourceManager implements ReplicationListener {
   public void addHFileRefs(TableName tableName, byte[] family, List<Pair<Path, Path>> pairs)
       throws IOException {
     for (ReplicationSourceInterface source : this.sources.values()) {
-      throwIOExceptionWhenFail(() -> source.addHFileRefs(tableName, family, pairs));
+      throwIOExceptionWhenFail(() -> addHFileRefs(source.getPeerId(), tableName, family, pairs));
+    }
+  }
+
+  /**
+   * Add hfile names to the queue to be replicated.
+   * @param peerId the replication peer id
+   * @param tableName Name of the table these files belongs to
+   * @param family Name of the family these files belong to
+   * @param pairs list of pairs of { HFile location in staging dir, HFile path in region dir which
+   *          will be added in the queue for replication}
+   * @throws ReplicationException If failed to add hfile references
+   */
+  private void addHFileRefs(String peerId, TableName tableName, byte[] family,
+    List<Pair<Path, Path>> pairs) throws ReplicationException {
+    // Only the normal replication source update here, its peerId is equals to queueId.
+    MetricsSource metrics = sourceMetrics.get(peerId);
+    ReplicationPeer replicationPeer = replicationPeers.getPeer(peerId);
+    Set<String> namespaces = replicationPeer.getNamespaces();
+    Map<TableName, List<String>> tableCFMap = replicationPeer.getTableCFs();
+    if (tableCFMap != null) { // All peers with TableCFs
+      List<String> tableCfs = tableCFMap.get(tableName);
+      if (tableCFMap.containsKey(tableName)
+        && (tableCfs == null || tableCfs.contains(Bytes.toString(family)))) {
+        this.queueStorage.addHFileRefs(peerId, pairs);
+        metrics.incrSizeOfHFileRefsQueue(pairs.size());
+      } else {
+        LOG.debug("HFiles will not be replicated belonging to the table {} family {} to peer id {}",
+          tableName, Bytes.toString(family), peerId);
+      }
+    } else if (namespaces != null) { // Only for set NAMESPACES peers
+      if (namespaces.contains(tableName.getNamespaceAsString())) {
+        this.queueStorage.addHFileRefs(peerId, pairs);
+        metrics.incrSizeOfHFileRefsQueue(pairs.size());
+      } else {
+        LOG.debug("HFiles will not be replicated belonging to the table {} family {} to peer id {}",
+          tableName, Bytes.toString(family), peerId);
+      }
+    } else {
+      // user has explicitly not defined any table cfs for replication, means replicate all the
+      // data
+      this.queueStorage.addHFileRefs(peerId, pairs);
+      metrics.incrSizeOfHFileRefsQueue(pairs.size());
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
index a361c44..781a1da 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
@@ -21,17 +21,16 @@ import java.io.IOException;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager;
 import org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 
 /**
@@ -114,12 +113,6 @@ public class ReplicationSourceDummy implements ReplicationSourceInterface {
   }
 
   @Override
-  public void addHFileRefs(TableName tableName, byte[] family, List<Pair<Path, Path>> files)
-      throws ReplicationException {
-    return;
-  }
-
-  @Override
   public boolean isPeerEnabled() {
     return true;
   }