You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2022/10/06 10:17:43 UTC
[hbase] branch master updated: HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)
This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/master by this push:
new 63cdd026f08 HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)
63cdd026f08 is described below
commit 63cdd026f08cdde6ac0fde1342ffd050e8e02441
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Thu Oct 6 18:17:34 2022 +0800
HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)
Signed-off-by: Andrew Purtell <ap...@apache.org>
---
.../FanOutOneBlockAsyncDFSOutputSaslHelper.java | 2 +-
.../hadoop/hbase/backup/impl/BackupManager.java | 5 +-
.../hbase/favored/FavoredNodeAssignmentHelper.java | 8 +-
.../hadoop/hbase/favored/FavoredNodesPlan.java | 2 +-
.../hbase/master/AssignmentVerificationReport.java | 5 +-
.../java/org/apache/hadoop/hbase/ClusterId.java | 7 +-
.../org/apache/hadoop/hbase/HRegionLocation.java | 4 +-
.../hbase/NotAllMetaRegionsOnlineException.java | 3 +-
.../org/apache/hadoop/hbase/ServerMetrics.java | 2 +-
.../java/org/apache/hadoop/hbase/client/Admin.java | 19 +--
.../org/apache/hadoop/hbase/client/Append.java | 10 +-
.../org/apache/hadoop/hbase/client/AsyncAdmin.java | 12 +-
.../hadoop/hbase/client/AsyncConnectionImpl.java | 2 +-
.../hbase/client/ColumnFamilyDescriptor.java | 2 +-
.../client/ColumnFamilyDescriptorBuilder.java | 4 +-
.../org/apache/hadoop/hbase/client/Delete.java | 4 +-
.../java/org/apache/hadoop/hbase/client/Get.java | 16 +--
.../org/apache/hadoop/hbase/client/Increment.java | 10 +-
.../hadoop/hbase/client/MutableRegionInfo.java | 6 +-
.../org/apache/hadoop/hbase/client/Mutation.java | 31 ++---
.../org/apache/hadoop/hbase/client/Operation.java | 2 +-
.../hbase/client/OperationWithAttributes.java | 2 +-
.../java/org/apache/hadoop/hbase/client/Put.java | 13 +-
.../java/org/apache/hadoop/hbase/client/Query.java | 9 +-
.../hadoop/hbase/client/RegionInfoDisplay.java | 10 +-
.../hadoop/hbase/client/RegionReplicaUtil.java | 2 +-
.../org/apache/hadoop/hbase/client/Result.java | 17 +--
.../apache/hadoop/hbase/client/ResultScanner.java | 2 +-
.../apache/hadoop/hbase/client/RowMutations.java | 4 +-
.../java/org/apache/hadoop/hbase/client/Scan.java | 74 ++++++------
.../java/org/apache/hadoop/hbase/client/Table.java | 7 +-
.../hadoop/hbase/client/TableDescriptor.java | 2 +-
.../hbase/client/TableDescriptorBuilder.java | 13 +-
.../org/apache/hadoop/hbase/client/TableState.java | 10 +-
.../hbase/client/backoff/ServerStatistics.java | 2 +-
.../client/metrics/ServerSideScanMetrics.java | 18 +--
.../hbase/coprocessor/ColumnInterpreter.java | 43 +++----
.../hbase/coprocessor/CoprocessorException.java | 2 +-
.../hbase/exceptions/ClientExceptionsUtil.java | 2 +-
.../exceptions/FailedSanityCheckException.java | 6 +-
.../hadoop/hbase/filter/ColumnValueFilter.java | 4 +-
.../org/apache/hadoop/hbase/filter/Filter.java | 5 +-
.../org/apache/hadoop/hbase/filter/FilterBase.java | 6 +-
.../org/apache/hadoop/hbase/filter/FilterList.java | 6 +-
.../apache/hadoop/hbase/filter/FilterListBase.java | 2 +-
.../apache/hadoop/hbase/filter/FuzzyRowFilter.java | 7 +-
.../hadoop/hbase/filter/RandomRowFilter.java | 4 +-
.../filter/SingleColumnValueExcludeFilter.java | 2 +-
.../hbase/filter/SingleColumnValueFilter.java | 2 +-
.../hadoop/hbase/filter/TimestampsFilter.java | 2 +-
.../apache/hadoop/hbase/ipc/CellBlockBuilder.java | 7 +-
.../hadoop/hbase/regionserver/LeaseException.java | 3 +-
.../regionserver/wal/FailedLogCloseException.java | 3 +-
.../wal/FailedSyncBeforeLogCloseException.java | 3 +-
.../hbase/security/AbstractHBaseSaslRpcClient.java | 4 +-
.../hadoop/hbase/security/EncryptionUtil.java | 4 +-
.../hadoop/hbase/security/HBaseSaslRpcClient.java | 6 +-
.../hbase/security/access/AccessControlClient.java | 12 +-
.../hbase/security/access/AccessControlUtil.java | 39 +++---
.../security/visibility/VisibilityClient.java | 12 +-
.../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 134 ++++++++++-----------
.../hbase/shaded/protobuf/RequestConverter.java | 97 +++++++++------
.../hbase/shaded/protobuf/ResponseConverter.java | 22 ++--
.../org/apache/hadoop/hbase/util/Writables.java | 4 +-
.../apache/hadoop/hbase/zookeeper/ZNodePaths.java | 4 +-
.../hadoop/hbase/client/TestDeleteTimeStamp.java | 2 +-
.../java/org/apache/hadoop/hbase/AuthUtil.java | 6 +-
.../hadoop/hbase/ByteBufferKeyOnlyKeyValue.java | 2 +-
.../java/org/apache/hadoop/hbase/CellBuilder.java | 2 +-
.../apache/hadoop/hbase/CellComparatorImpl.java | 5 +-
.../java/org/apache/hadoop/hbase/CellUtil.java | 19 ++-
.../apache/hadoop/hbase/CompoundConfiguration.java | 2 +-
.../java/org/apache/hadoop/hbase/ExtendedCell.java | 2 +-
.../apache/hadoop/hbase/HBaseConfiguration.java | 2 +-
.../java/org/apache/hadoop/hbase/KeyValue.java | 4 +-
.../org/apache/hadoop/hbase/KeyValueTestUtil.java | 4 +-
.../java/org/apache/hadoop/hbase/KeyValueUtil.java | 63 +++++-----
.../org/apache/hadoop/hbase/PrivateCellUtil.java | 72 ++++++-----
.../org/apache/hadoop/hbase/codec/BaseDecoder.java | 2 +-
.../org/apache/hadoop/hbase/codec/CellCodec.java | 2 +-
.../hadoop/hbase/codec/CellCodecWithTags.java | 2 +-
.../hadoop/hbase/io/ByteBufferOutputStream.java | 2 +-
.../apache/hadoop/hbase/io/CellOutputStream.java | 4 +-
.../hadoop/hbase/io/ImmutableBytesWritable.java | 5 +-
.../hadoop/hbase/io/TagCompressionContext.java | 8 +-
.../org/apache/hadoop/hbase/io/crypto/Cipher.java | 8 +-
.../apache/hadoop/hbase/io/crypto/Decryptor.java | 6 +-
.../apache/hadoop/hbase/io/crypto/Encryption.java | 22 ++--
.../apache/hadoop/hbase/io/crypto/Encryptor.java | 6 +-
.../apache/hadoop/hbase/io/crypto/KeyProvider.java | 6 +-
.../hadoop/hbase/io/encoding/DataBlockEncoder.java | 7 +-
.../hbase/io/encoding/DataBlockEncoding.java | 2 +-
.../hadoop/hbase/io/encoding/EncodedDataBlock.java | 2 +-
.../io/encoding/HFileBlockDecodingContext.java | 6 +-
.../hbase/io/encoding/IndexBlockEncoding.java | 2 +-
.../io/hadoopbackport/ThrottledInputStream.java | 3 +-
.../apache/hadoop/hbase/io/util/Dictionary.java | 4 +-
.../apache/hadoop/hbase/io/util/StreamUtils.java | 4 +-
.../java/org/apache/hadoop/hbase/nio/ByteBuff.java | 21 ++--
.../org/apache/hadoop/hbase/nio/MultiByteBuff.java | 36 +++---
.../org/apache/hadoop/hbase/security/User.java | 2 +-
.../apache/hadoop/hbase/security/UserProvider.java | 2 +-
.../hbase/util/AbstractPositionedByteRange.java | 4 +-
.../apache/hadoop/hbase/util/ByteBufferUtils.java | 4 +-
.../org/apache/hadoop/hbase/util/ByteRange.java | 11 +-
.../java/org/apache/hadoop/hbase/util/Bytes.java | 17 +--
.../org/apache/hadoop/hbase/util/ChecksumType.java | 8 +-
.../java/org/apache/hadoop/hbase/util/Classes.java | 7 +-
.../apache/hadoop/hbase/util/CommonFSUtils.java | 2 +-
.../hadoop/hbase/util/CoprocessorClassLoader.java | 2 +-
.../org/apache/hadoop/hbase/util/KeyLocker.java | 2 +-
.../java/org/apache/hadoop/hbase/util/MD5Hash.java | 8 +-
.../java/org/apache/hadoop/hbase/util/Pair.java | 4 +-
.../apache/hadoop/hbase/util/PairOfSameType.java | 4 +-
.../hadoop/hbase/util/PositionedByteRange.java | 4 +-
.../apache/hadoop/hbase/util/PrettyPrinter.java | 9 +-
.../hadoop/hbase/util/SimpleMutableByteRange.java | 8 +-
.../util/SimplePositionedMutableByteRange.java | 14 +--
.../apache/hadoop/hbase/util/TimeMeasurable.java | 2 +-
.../org/apache/hadoop/hbase/util/UnsafeAccess.java | 2 +-
.../hadoop/hbase/util/WindowMovingAverage.java | 2 +-
.../apache/hadoop/hbase/zookeeper/ZKConfig.java | 8 +-
.../hadoop/hbase/TestHBaseConfiguration.java | 10 +-
.../hbase/io/crypto/tls/X509TestContext.java | 2 +-
.../hbase/io/crypto/tls/X509TestHelpers.java | 4 +-
.../hadoop/hbase/util/RandomDistribution.java | 15 ++-
.../metrics2/util/MetricSampleQuantiles.java | 4 +-
.../org/apache/hadoop/hbase/http/HttpServer.java | 8 +-
.../hbase/http/ProxyUserAuthenticationFilter.java | 2 +-
.../hadoop/hbase/http/jmx/JMXJsonServlet.java | 4 +-
.../apache/hadoop/hbase/util/JSONMetricUtil.java | 2 +-
.../hadoop/hbase/DistributedHBaseCluster.java | 2 +-
.../hbase/mapreduce/IntegrationTestBulkLoad.java | 2 +-
.../hbase/test/IntegrationTestBigLinkedList.java | 18 ++-
.../hbase/test/IntegrationTestReplication.java | 6 +-
.../org/apache/hadoop/hbase/mapred/Driver.java | 3 -
.../hadoop/hbase/mapred/GroupingTableMap.java | 10 +-
.../hadoop/hbase/mapred/IdentityTableMap.java | 2 +-
.../hadoop/hbase/mapred/IdentityTableReduce.java | 2 +-
.../mapred/MultiTableSnapshotInputFormat.java | 1 -
.../org/apache/hadoop/hbase/mapred/RowCounter.java | 7 +-
.../hadoop/hbase/mapred/TableInputFormatBase.java | 8 +-
.../hadoop/hbase/mapred/TableOutputFormat.java | 3 +-
.../hadoop/hbase/mapred/TableRecordReader.java | 14 ++-
.../hadoop/hbase/mapred/TableRecordReaderImpl.java | 2 -
.../org/apache/hadoop/hbase/mapred/TableSplit.java | 2 +-
.../apache/hadoop/hbase/mapreduce/CellCreator.java | 8 +-
.../apache/hadoop/hbase/mapreduce/HashTable.java | 2 +-
.../apache/hadoop/hbase/mapreduce/ImportTsv.java | 6 +-
.../mapreduce/MultiTableHFileOutputFormat.java | 2 +-
.../hbase/mapreduce/MultiTableOutputFormat.java | 12 +-
.../MultiTableSnapshotInputFormatImpl.java | 8 +-
.../apache/hadoop/hbase/mapreduce/RowCounter.java | 2 +-
.../hbase/mapreduce/TableInputFormatBase.java | 7 +-
.../hadoop/hbase/mapreduce/TableMapReduceUtil.java | 4 +-
.../hadoop/hbase/mapreduce/TableOutputFormat.java | 3 +-
.../hadoop/hbase/mapreduce/TableRecordReader.java | 3 +-
.../hadoop/hbase/mapreduce/TextSortReducer.java | 4 +-
.../hadoop/hbase/mapreduce/TsvImporterMapper.java | 4 +-
.../hbase/mapreduce/TsvImporterTextMapper.java | 4 +-
.../apache/hadoop/hbase/PerformanceEvaluation.java | 13 +-
.../hadoop/hbase/mapred/TestTableInputFormat.java | 18 +--
.../mapreduce/MultiTableInputFormatTestBase.java | 2 +-
.../hbase/mapreduce/TestHFileOutputFormat2.java | 8 +-
.../hadoop/hbase/mapreduce/TestImportExport.java | 16 +--
.../TestImportTSVWithOperationAttributes.java | 8 +-
.../TestImportTSVWithVisibilityLabels.java | 6 +-
.../hadoop/hbase/mapreduce/TestImportTsv.java | 2 +-
.../mapreduce/TestMultiTableInputFormatBase.java | 2 +-
.../mapreduce/TestMultithreadedTableMapper.java | 7 +-
.../hadoop/hbase/mapreduce/TestRowCounter.java | 26 ++--
.../hbase/mapreduce/TestTableInputFormat.java | 25 ++--
.../hadoop/hbase/mapreduce/TestTableMapReduce.java | 4 +-
.../hbase/mapreduce/TestTableMapReduceBase.java | 5 +-
.../store/wal/ProcedureWALPrettyPrinter.java | 3 +-
.../apache/hadoop/hbase/rest/ExistsResource.java | 2 +-
.../apache/hadoop/hbase/rest/MultiRowResource.java | 2 +-
.../hbase/rest/NamespacesInstanceResource.java | 4 +-
.../hadoop/hbase/rest/NamespacesResource.java | 2 +-
.../hadoop/hbase/rest/ProtobufMessageHandler.java | 2 +-
.../org/apache/hadoop/hbase/rest/RESTServlet.java | 2 +-
.../apache/hadoop/hbase/rest/RegionsResource.java | 2 +-
.../org/apache/hadoop/hbase/rest/RootResource.java | 2 +-
.../org/apache/hadoop/hbase/rest/RowResource.java | 2 +-
.../apache/hadoop/hbase/rest/ScannerResource.java | 2 +-
.../hbase/rest/StorageClusterStatusResource.java | 2 +-
.../hbase/rest/StorageClusterVersionResource.java | 2 +-
.../apache/hadoop/hbase/rest/TableResource.java | 2 +-
.../apache/hadoop/hbase/rest/VersionResource.java | 2 +-
.../apache/hadoop/hbase/rest/client/Client.java | 42 +++----
.../rest/filter/RestCsrfPreventionFilter.java | 2 +-
.../apache/hadoop/hbase/rest/model/CellModel.java | 10 +-
.../hbase/rest/model/NamespacesInstanceModel.java | 4 +-
.../hadoop/hbase/rest/model/NamespacesModel.java | 2 +-
.../hadoop/hbase/rest/model/ScannerModel.java | 6 +-
.../hadoop/hbase/rest/model/TableInfoModel.java | 2 +-
.../apache/hadoop/hbase/rest/model/TableModel.java | 2 +-
.../hadoop/hbase/rest/client/RemoteAdmin.java | 16 +--
.../hadoop/hbase/rest/client/TestRemoteTable.java | 2 +-
.../org/apache/hadoop/hbase/HBaseServerBase.java | 2 +-
.../java/org/apache/hadoop/hbase/HealthReport.java | 4 +-
.../org/apache/hadoop/hbase/LocalHBaseCluster.java | 12 +-
.../apache/hadoop/hbase/RegionStateListener.java | 4 +-
.../java/org/apache/hadoop/hbase/SplitLogTask.java | 4 +-
.../apache/hadoop/hbase/backup/HFileArchiver.java | 7 +-
.../hadoop/hbase/client/VersionInfoUtil.java | 3 +-
.../coordination/SplitLogManagerCoordination.java | 2 +-
.../ZKSplitLogManagerCoordination.java | 4 +-
.../coordination/ZkSplitLogWorkerCoordination.java | 4 +-
.../hadoop/hbase/coprocessor/CoprocessorHost.java | 2 +-
.../hadoop/hbase/coprocessor/MasterObserver.java | 41 ++++---
.../hadoop/hbase/coprocessor/RegionObserver.java | 18 +--
.../hbase/errorhandling/ForeignException.java | 8 +-
.../hbase/errorhandling/ForeignExceptionSnare.java | 2 +-
.../apache/hadoop/hbase/executor/EventHandler.java | 2 +-
.../hadoop/hbase/executor/ExecutorService.java | 2 +-
.../java/org/apache/hadoop/hbase/io/Reference.java | 21 ++--
.../apache/hadoop/hbase/io/hfile/BlockCache.java | 2 +-
.../hadoop/hbase/io/hfile/BlockCacheUtil.java | 12 +-
.../io/hfile/BlockCompressedSizePredicator.java | 2 +-
.../apache/hadoop/hbase/io/hfile/CacheConfig.java | 2 +-
.../hbase/io/hfile/CacheableDeserializer.java | 2 +-
.../hbase/io/hfile/CompoundBloomFilterWriter.java | 6 +-
.../hadoop/hbase/io/hfile/HFileBlockIndex.java | 63 +++++-----
.../hbase/io/hfile/HFileDataBlockEncoder.java | 6 +-
.../hadoop/hbase/io/hfile/HFileReaderImpl.java | 12 +-
.../apache/hadoop/hbase/io/hfile/HFileScanner.java | 21 ++--
.../hadoop/hbase/io/hfile/HFileWriterImpl.java | 6 +-
.../hadoop/hbase/io/hfile/InlineBlockWriter.java | 9 +-
.../hbase/io/hfile/NoOpIndexBlockEncoder.java | 8 +-
.../PreviousBlockCompressionRatePredicator.java | 2 +-
.../io/hfile/UncompressedBlockSizePredicator.java | 2 +-
.../hbase/io/hfile/bucket/BucketAllocator.java | 5 +-
.../hadoop/hbase/io/hfile/bucket/BucketCache.java | 5 +-
.../hbase/io/hfile/bucket/ByteBufferIOEngine.java | 6 +-
.../hadoop/hbase/io/hfile/bucket/FileIOEngine.java | 10 +-
.../hbase/io/hfile/bucket/FileMmapIOEngine.java | 6 +-
.../hadoop/hbase/io/hfile/bucket/IOEngine.java | 6 +-
.../hadoop/hbase/io/util/MemorySizeUtil.java | 9 +-
.../apache/hadoop/hbase/ipc/PriorityFunction.java | 6 +-
.../java/org/apache/hadoop/hbase/ipc/RpcCall.java | 2 +-
.../apache/hadoop/hbase/ipc/RpcCallContext.java | 2 +-
.../hadoop/hbase/ipc/RpcSchedulerContext.java | 3 +-
.../org/apache/hadoop/hbase/ipc/RpcServer.java | 13 +-
.../hadoop/hbase/ipc/RpcServerInterface.java | 2 +-
.../hadoop/hbase/ipc/ServerRpcConnection.java | 6 +-
.../hadoop/hbase/ipc/SimpleRpcScheduler.java | 6 +-
.../apache/hadoop/hbase/ipc/SimpleRpcServer.java | 12 +-
.../hadoop/hbase/ipc/SimpleRpcServerResponder.java | 6 +-
.../hbase/ipc/SimpleServerRpcConnection.java | 2 +-
.../hadoop/hbase/master/DrainingServerTracker.java | 2 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 3 +-
.../hadoop/hbase/master/MasterCoprocessorHost.java | 24 ++--
.../hadoop/hbase/master/MasterFileSystem.java | 4 +-
.../hadoop/hbase/master/MasterRpcServices.java | 6 +-
.../apache/hadoop/hbase/master/MasterServices.java | 42 +++----
.../hbase/master/MetricsAssignmentManager.java | 8 +-
.../hbase/master/RegionPlacementMaintainer.java | 15 +--
.../apache/hadoop/hbase/master/ServerManager.java | 14 +--
.../hadoop/hbase/master/SplitLogManager.java | 8 +-
.../assignment/MergeTableRegionsProcedure.java | 2 +-
.../master/procedure/CloneSnapshotProcedure.java | 10 +-
.../master/procedure/EnableTableProcedure.java | 12 +-
.../master/procedure/RestoreSnapshotProcedure.java | 8 +-
.../hbase/master/snapshot/SnapshotManager.java | 29 ++---
.../hadoop/hbase/mob/DefaultMobStoreFlusher.java | 2 +-
.../java/org/apache/hadoop/hbase/mob/MobFile.java | 12 +-
.../org/apache/hadoop/hbase/mob/MobFileCache.java | 2 +-
.../org/apache/hadoop/hbase/mob/MobFileName.java | 20 +--
.../java/org/apache/hadoop/hbase/mob/MobUtils.java | 3 +-
.../hadoop/hbase/monitoring/ThreadMonitoring.java | 2 +-
.../hbase/namespace/NamespaceStateManager.java | 4 +-
.../hbase/procedure/MasterProcedureManager.java | 8 +-
.../apache/hadoop/hbase/procedure/Procedure.java | 14 +--
.../hbase/procedure/ProcedureCoordinator.java | 13 +-
.../hbase/procedure/ProcedureCoordinatorRpcs.java | 4 +-
.../hadoop/hbase/procedure/ProcedureMember.java | 12 +-
.../procedure/RegionServerProcedureManager.java | 4 +-
.../hadoop/hbase/procedure/Subprocedure.java | 12 +-
.../hadoop/hbase/procedure/ZKProcedureUtil.java | 2 +-
.../RegionServerFlushTableProcedureManager.java | 14 +--
.../hbase/protobuf/ReplicationProtobufUtil.java | 4 +-
.../hbase/regionserver/AbstractMemStore.java | 2 +-
.../apache/hadoop/hbase/regionserver/CellSink.java | 2 +-
.../hadoop/hbase/regionserver/ChunkCreator.java | 2 +-
.../hbase/regionserver/CompactingMemStore.java | 2 +-
.../hbase/regionserver/FavoredNodesForRegion.java | 6 +-
.../hadoop/hbase/regionserver/FlushRequester.java | 8 +-
.../hadoop/hbase/regionserver/HMobStore.java | 18 +--
.../apache/hadoop/hbase/regionserver/HRegion.java | 56 ++++-----
.../hbase/regionserver/HRegionFileSystem.java | 42 +++----
.../hadoop/hbase/regionserver/HRegionServer.java | 6 +-
.../hadoop/hbase/regionserver/HStoreFile.java | 6 +-
.../hadoop/hbase/regionserver/HeapMemoryTuner.java | 4 +-
.../hadoop/hbase/regionserver/InternalScan.java | 2 +-
.../hadoop/hbase/regionserver/InternalScanner.java | 6 +-
.../hadoop/hbase/regionserver/KeyValueHeap.java | 14 +--
.../hadoop/hbase/regionserver/KeyValueScanner.java | 2 +-
.../apache/hadoop/hbase/regionserver/MemStore.java | 17 +--
.../hadoop/hbase/regionserver/MemStoreFlusher.java | 20 ++-
.../regionserver/MiniBatchOperationInProgress.java | 16 +--
.../MultiVersionConcurrencyControl.java | 4 +-
.../hbase/regionserver/MutableOnlineRegions.java | 2 +-
.../hadoop/hbase/regionserver/OnlineRegions.java | 8 +-
.../hadoop/hbase/regionserver/OperationStatus.java | 9 +-
.../hadoop/hbase/regionserver/RSRpcServices.java | 22 ++--
.../apache/hadoop/hbase/regionserver/Region.java | 33 ++---
.../hbase/regionserver/RegionCoprocessorHost.java | 36 +++---
.../hadoop/hbase/regionserver/RegionScanner.java | 2 +-
.../hbase/regionserver/RegionSplitPolicy.java | 3 +-
.../hbase/regionserver/ReplicationSinkService.java | 1 -
.../hbase/regionserver/ReversedKeyValueHeap.java | 8 +-
.../hbase/regionserver/ReversedStoreScanner.java | 5 +-
.../apache/hadoop/hbase/regionserver/ScanInfo.java | 4 +-
.../hadoop/hbase/regionserver/ScannerContext.java | 34 ++----
.../hbase/regionserver/SecureBulkLoadManager.java | 4 +-
.../hadoop/hbase/regionserver/SegmentFactory.java | 2 +-
.../hadoop/hbase/regionserver/ShipperListener.java | 2 +-
.../hadoop/hbase/regionserver/ShutdownHook.java | 6 +-
.../apache/hadoop/hbase/regionserver/Store.java | 2 +-
.../hadoop/hbase/regionserver/StoreFileInfo.java | 4 +-
.../hadoop/hbase/regionserver/StoreFileReader.java | 6 +-
.../hbase/regionserver/StoreFileScanner.java | 4 +-
.../hbase/regionserver/StoreFlushContext.java | 6 +-
.../hadoop/hbase/regionserver/StoreScanner.java | 19 +--
.../hbase/regionserver/TimeRangeTracker.java | 2 +-
.../compactions/CompactionProgress.java | 2 +-
.../compactions/ExploringCompactionPolicy.java | 8 +-
.../compactions/SortedCompactionPolicy.java | 4 +-
.../regionserver/querymatcher/ColumnTracker.java | 7 +-
.../querymatcher/ScanDeleteTracker.java | 2 +-
.../querymatcher/ScanQueryMatcher.java | 6 +-
.../querymatcher/ScanWildcardColumnTracker.java | 2 +-
.../snapshot/RegionServerSnapshotManager.java | 16 +--
.../hbase/regionserver/wal/AbstractFSWAL.java | 4 +-
.../hbase/regionserver/wal/ProtobufLogReader.java | 2 +-
.../regionserver/wal/SequenceIdAccounting.java | 7 +-
.../hadoop/hbase/regionserver/wal/SyncFuture.java | 2 +-
.../replication/HBaseReplicationEndpoint.java | 6 +-
.../regionserver/DumpReplicationQueues.java | 2 +-
.../replication/regionserver/MetricsSink.java | 10 +-
.../replication/regionserver/MetricsSource.java | 14 +--
.../replication/regionserver/ReplicationSink.java | 2 +-
.../hadoop/hbase/rsgroup/RSGroupInfoManager.java | 2 +-
.../hbase/security/access/AccessChecker.java | 2 +-
.../hbase/security/access/AccessController.java | 8 +-
.../hadoop/hbase/security/access/AuthManager.java | 2 +-
.../hbase/security/access/ZKPermissionWatcher.java | 4 +-
.../DefaultVisibilityLabelServiceImpl.java | 5 +-
.../security/visibility/ScanLabelGenerator.java | 3 +-
.../security/visibility/VisibilityController.java | 4 +-
.../visibility/VisibilityLabelService.java | 43 ++++---
.../visibility/VisibilityLabelServiceManager.java | 5 +-
.../security/visibility/VisibilityLabelsCache.java | 6 +-
.../hbase/security/visibility/VisibilityUtils.java | 27 +++--
.../visibility/ZKVisibilityLabelWatcher.java | 4 +-
.../hbase/snapshot/RestoreSnapshotHelper.java | 6 +-
.../hbase/snapshot/SnapshotDescriptionUtils.java | 3 +-
.../org/apache/hadoop/hbase/util/BloomContext.java | 4 +-
.../org/apache/hadoop/hbase/util/BloomFilter.java | 4 +-
.../apache/hadoop/hbase/util/BloomFilterChunk.java | 2 +-
.../hadoop/hbase/util/BloomFilterFactory.java | 18 +--
.../apache/hadoop/hbase/util/BloomFilterUtil.java | 27 +++--
.../hadoop/hbase/util/DirectMemoryUtils.java | 4 +-
.../apache/hadoop/hbase/util/EncryptionTest.java | 3 +-
.../java/org/apache/hadoop/hbase/util/FSUtils.java | 55 ++++-----
.../org/apache/hadoop/hbase/util/HBaseFsck.java | 20 ++-
.../apache/hadoop/hbase/util/HBaseFsckRepair.java | 2 +-
.../apache/hadoop/hbase/util/JVMClusterUtil.java | 11 +-
.../hadoop/hbase/util/ModifyRegionUtils.java | 8 +-
.../hadoop/hbase/util/MunkresAssignment.java | 2 +-
.../org/apache/hadoop/hbase/util/RegionMover.java | 6 +-
.../apache/hadoop/hbase/util/RegionSplitter.java | 28 ++---
.../hadoop/hbase/util/RollingStatCalculator.java | 10 +-
.../apache/hadoop/hbase/util/ZKDataMigrator.java | 3 +-
.../hbase/util/hbck/HFileCorruptionChecker.java | 22 ++--
.../main/java/org/apache/hadoop/hbase/wal/WAL.java | 8 +-
.../org/apache/hadoop/hbase/wal/WALKeyImpl.java | 8 +-
.../apache/hadoop/hbase/wal/WALPrettyPrinter.java | 26 ++--
.../org/apache/hadoop/hbase/HBaseTestingUtil.java | 76 +++++++-----
.../hadoop/hbase/HFilePerformanceEvaluation.java | 16 +--
.../org/apache/hadoop/hbase/MetaMockingUtil.java | 6 +-
.../hadoop/hbase/TestGlobalMemStoreSize.java | 4 +-
.../hbase/TestPartialResultsFromClientSide.java | 26 ++--
.../apache/hadoop/hbase/TestRegionRebalancing.java | 2 +-
.../org/apache/hadoop/hbase/TestSerialization.java | 2 +-
.../org/apache/hadoop/hbase/TimestampTestBase.java | 11 +-
.../hadoop/hbase/client/FromClientSideBase.java | 8 +-
.../org/apache/hadoop/hbase/client/TestAdmin2.java | 2 +-
.../hadoop/hbase/client/TestFromClientSide3.java | 2 +-
.../client/TestFromClientSideScanExcpetion.java | 2 +-
.../apache/hadoop/hbase/client/TestMetaCache.java | 2 +-
.../org/apache/hadoop/hbase/client/TestResult.java | 3 +-
.../hadoop/hbase/client/TestScannerTimeout.java | 6 +-
.../hadoop/hbase/client/TestSizeFailures.java | 2 +-
.../hbase/client/TestSnapshotFromClient.java | 3 +-
.../hadoop/hbase/client/TestSnapshotMetadata.java | 2 +-
.../hadoop/hbase/client/TestTimestampsFilter.java | 4 +-
.../hbase/client/locking/TestEntityLocks.java | 2 +-
.../coprocessor/TestCoreMasterCoprocessor.java | 2 +-
.../coprocessor/TestCoreRegionCoprocessor.java | 2 +-
.../TestCoreRegionServerCoprocessor.java | 2 +-
.../coprocessor/TestOpenTableInCoprocessor.java | 2 +-
.../coprocessor/TestRegionObserverBypass.java | 4 +-
.../coprocessor/TestRegionObserverInterface.java | 2 +-
.../hbase/filter/TestColumnPaginationFilter.java | 4 +-
.../hbase/filter/TestDependentColumnFilter.java | 4 +-
.../org/apache/hadoop/hbase/filter/TestFilter.java | 8 +-
.../apache/hadoop/hbase/filter/TestFilterList.java | 14 +--
.../filter/TestFilterListOrOperatorWithBlkCnt.java | 6 +-
.../hbase/filter/TestInclusiveStopFilter.java | 4 +-
.../hbase/filter/TestMultiRowRangeFilter.java | 6 +-
.../apache/hadoop/hbase/filter/TestPageFilter.java | 4 +-
.../hadoop/hbase/filter/TestRandomRowFilter.java | 4 +-
.../filter/TestSingleColumnValueExcludeFilter.java | 2 +-
.../hbase/filter/TestSingleColumnValueFilter.java | 4 +-
.../hadoop/hbase/io/TestHalfStoreFileReader.java | 2 +-
.../org/apache/hadoop/hbase/io/TestHeapSize.java | 2 +-
.../hbase/io/encoding/TestDataBlockEncoders.java | 6 +-
.../apache/hadoop/hbase/io/hfile/NanoTimer.java | 6 +-
.../hadoop/hbase/io/hfile/RandomKeyValueUtil.java | 3 +-
.../apache/hadoop/hbase/io/hfile/TestHFile.java | 4 +-
.../hadoop/hbase/io/hfile/TestHFileBlockIndex.java | 3 +-
.../hbase/io/hfile/TestHFileDataBlockEncoder.java | 4 +-
.../io/hfile/bucket/TestBucketCacheRefCnt.java | 2 -
.../io/hfile/bucket/TestBucketWriterThread.java | 8 +-
.../hadoop/hbase/master/MockRegionServer.java | 5 +-
.../hbase/master/TestActiveMasterManager.java | 2 +-
.../TestMasterFailoverBalancerPersistence.java | 6 +-
.../hadoop/hbase/master/TestMasterTransitions.java | 10 +-
.../hadoop/hbase/master/TestRegionPlacement.java | 10 +-
.../balancer/TestStochasticBalancerJmxMetrics.java | 2 +-
.../hbase/master/janitor/TestCatalogJanitor.java | 2 +-
.../janitor/TestCatalogJanitorInMemoryStates.java | 4 +-
.../procedure/MasterProcedureTestingUtility.java | 2 +-
.../TestTableDescriptorModificationFromClient.java | 2 +-
.../org/apache/hadoop/hbase/mob/MobTestUtil.java | 2 +-
.../hbase/procedure/SimpleRSProcedureManager.java | 2 +-
.../procedure/TestZKProcedureControllers.java | 4 +-
.../hbase/protobuf/TestReplicationProtobuf.java | 2 +-
.../hbase/regionserver/CreateRandomStoreFile.java | 2 +-
.../hbase/regionserver/DataBlockEncodingTool.java | 4 +-
.../hbase/regionserver/TestCompactingMemStore.java | 8 +-
.../hadoop/hbase/regionserver/TestCompaction.java | 2 +-
.../hbase/regionserver/TestCompactionState.java | 5 +-
.../TestDateTieredCompactionPolicy.java | 2 +-
.../hbase/regionserver/TestDefaultMemStore.java | 18 +--
.../hbase/regionserver/TestDeleteMobTable.java | 2 +-
.../hadoop/hbase/regionserver/TestHMobStore.java | 12 +-
.../hadoop/hbase/regionserver/TestHRegion.java | 29 +++--
.../regionserver/TestHRegionReplayEvents.java | 4 +-
.../hadoop/hbase/regionserver/TestHStoreFile.java | 2 +-
.../hbase/regionserver/TestJoinedScanners.java | 3 +-
.../hbase/regionserver/TestMajorCompaction.java | 4 +-
.../regionserver/TestRSKilledWhenInitializing.java | 2 +-
.../regionserver/TestRegionReplicaFailover.java | 2 +-
.../TestRegionServerOnlineConfigChange.java | 2 +-
.../regionserver/TestRequestsPerSecondMetric.java | 2 +-
.../hadoop/hbase/regionserver/TestRowTooBig.java | 2 +-
.../hadoop/hbase/regionserver/TestScanner.java | 2 +-
.../TestSplitTransactionOnCluster.java | 7 +-
.../hbase/regionserver/TestStoreScanner.java | 4 +-
.../querymatcher/TestUserScanQueryMatcher.java | 4 +-
.../hbase/regionserver/wal/AbstractTestFSWAL.java | 8 +-
.../regionserver/wal/AbstractTestProtobufLog.java | 4 +-
.../regionserver/wal/AbstractTestWALReplay.java | 3 +-
.../hbase/regionserver/wal/TestLogRolling.java | 2 +-
.../regionserver/wal/TestLogRollingNoCluster.java | 2 +-
.../TestReplicationDisableInactivePeer.java | 2 +-
.../regionserver/TestReplicationSink.java | 10 +-
.../regionserver/TestWALEntrySinkFilter.java | 2 +-
.../hbase/security/AbstractTestSecureIPC.java | 2 +-
.../TestUsersOperationsWithSecureHadoop.java | 2 +-
.../hbase/snapshot/SnapshotTestingUtils.java | 2 +-
.../tool/TestBulkLoadHFilesSplitRecovery.java | 3 +-
.../hadoop/hbase/util/BaseTestHBaseFsck.java | 11 +-
.../hadoop/hbase/util/HFileArchiveTestingUtil.java | 4 +-
.../hbase/util/ProcessBasedLocalHBaseCluster.java | 2 +-
.../org/apache/hadoop/hbase/util/TestFSUtils.java | 2 +-
.../hadoop/hbase/util/hbck/HbckTestingUtil.java | 2 +-
.../hbase/util/test/LoadTestDataGenerator.java | 10 +-
.../apache/hadoop/hbase/wal/IOTestProvider.java | 2 +-
.../hadoop/hbase/wal/TestFSHLogProvider.java | 7 +-
.../apache/hadoop/hbase/wal/TestWALFactory.java | 6 +-
.../apache/hadoop/hbase/wal/TestWALMethods.java | 2 +-
.../hbase/wal/TestWALOpenAfterDNRollingStart.java | 2 +-
.../org/apache/hadoop/hbase/wal/TestWALSplit.java | 4 +-
.../hadoop/hbase/wal/WALPerformanceEvaluation.java | 5 +-
.../java/org/apache/hadoop/hbase/HBaseCluster.java | 2 +-
.../apache/hadoop/hbase/HBaseTestingUtility.java | 121 ++++++++++---------
.../org/apache/hadoop/hbase/MiniHBaseCluster.java | 28 +++--
.../hadoop/hbase/thrift/HBaseServiceHandler.java | 2 +-
.../hadoop/hbase/thrift/ThriftUtilities.java | 20 +--
.../TestThriftHBaseServiceHandlerWithLabels.java | 5 +-
.../hadoop/hbase/zookeeper/MetaTableLocator.java | 4 +-
.../hbase/zookeeper/RecoverableZooKeeper.java | 5 +-
.../org/apache/hadoop/hbase/zookeeper/ZKUtil.java | 30 +++--
497 files changed, 2090 insertions(+), 2099 deletions(-)
diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
index 4ac46e8cc5d..00b6631379b 100644
--- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
+++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
@@ -368,7 +368,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
* Create a ByteString from byte array without copying (wrap), and then set it as the payload
* for the builder.
* @param builder builder for HDFS DataTransferEncryptorMessage.
- * @param payload byte array of payload. n
+ * @param payload byte array of payload.
*/
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
byte[] payload) throws IOException {
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index a543b577b7a..ed1755ad502 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -165,7 +165,7 @@ public class BackupManager implements Closeable {
}
/**
- * Get configuration n
+ * Get configuration
*/
Configuration getConf() {
return conf;
@@ -192,7 +192,8 @@ public class BackupManager implements Closeable {
* @param tableList table list
* @param targetRootDir root dir
* @param workers number of parallel workers
- * @param bandwidth bandwidth per worker in MB per sec n * @throws BackupException exception
+ * @param bandwidth bandwidth per worker in MB per sec
+ * @throws BackupException exception
*/
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList,
String targetRootDir, int workers, long bandwidth) throws BackupException {
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index 68b08544196..6c021bf622a 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -184,9 +184,7 @@ public class FavoredNodeAssignmentHelper {
return servers;
}
- /**
- * n * @return PB'ed bytes of {@link FavoredNodes} generated by the server list.
- */
+ /** Returns PB'ed bytes of {@link FavoredNodes} generated by the server list. */
public static byte[] getFavoredNodes(List<ServerName> serverAddrList) {
FavoredNodes.Builder f = FavoredNodes.newBuilder();
for (ServerName s : serverAddrList) {
@@ -319,8 +317,8 @@ public class FavoredNodeAssignmentHelper {
/**
* For regions that share the primary, avoid placing the secondary and tertiary on a same RS. Used
- * for generating new assignments for the primary/secondary/tertiary RegionServers n * @return the
- * map of regions to the servers the region-files should be hosted on
+ * for generating new assignments for the primary/secondary/tertiary RegionServers
+ * @return the map of regions to the servers the region-files should be hosted on
*/
public Map<RegionInfo, ServerName[]>
placeSecondaryAndTertiaryWithRestrictions(Map<RegionInfo, ServerName> primaryRSMap) {
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
index 4c6f2b3cc27..3b3aedad6c6 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
@@ -80,7 +80,7 @@ public class FavoredNodesPlan {
/**
* Return the position of the server in the favoredNodes list. Assumes the favoredNodes list is of
- * size 3. n
+ * size 3.
*/
public static Position getFavoredServerPosition(List<ServerName> favoredNodes,
ServerName server) {
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
index 8858e13da70..d86201a34fb 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
@@ -294,7 +294,7 @@ public class AssignmentVerificationReport {
}
/**
- * Use this to project the dispersion scores nnn
+ * Use this to project the dispersion scores
*/
public void fillUpDispersion(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot,
FavoredNodesPlan newPlan) {
@@ -566,7 +566,8 @@ public class AssignmentVerificationReport {
/**
* Return the number of regions based on the position (primary/secondary/ tertiary) assigned to
- * their favored nodes n * @return the number of regions
+ * their favored nodes
+ * @return the number of regions
*/
int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) {
return favoredNodes[position.ordinal()];
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
index 8c675c4522e..67438677dad 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
@@ -53,7 +53,8 @@ public class ClusterId {
/**
* Parse the serialized representation of the {@link ClusterId}
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
- * @return An instance of {@link ClusterId} made from <code>bytes</code> n * @see #toByteArray()
+ * @return An instance of {@link ClusterId} made from <code>bytes</code>
+ * @see #toByteArray()
*/
public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
@@ -79,9 +80,7 @@ public class ClusterId {
return builder.setClusterId(this.id).build();
}
- /**
- * n * @return A {@link ClusterId} made from the passed in <code>cid</code>
- */
+ /** Returns A {@link ClusterId} made from the passed in <code>cid</code> */
public static ClusterId convert(final ClusterIdProtos.ClusterId cid) {
return new ClusterId(cid.getClusterId());
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index ebf6d919374..4d554fa19bf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -80,9 +80,7 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
return this.serverName.hashCode();
}
- /**
- * n
- */
+ /** Returns regionInfo */
public RegionInfo getRegion() {
return regionInfo;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
index a15833ac17a..bc156353a1b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
@@ -34,8 +34,7 @@ public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException {
}
/**
- * n
- */
+ * */
public NotAllMetaRegionsOnlineException(String message) {
super(message);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
index e0c408781f8..2684886ba3d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -76,7 +76,7 @@ public interface ServerMetrics {
Map<String, List<ReplicationLoadSource>> getReplicationLoadSourceMap();
/**
- * Call directly from client such as hbase shell n
+ * Call directly from client such as hbase shell
*/
@Nullable
ReplicationLoadSink getReplicationLoadSink();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 597dfcd266a..f5da0aa0bde 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -373,9 +373,10 @@ public interface Admin extends Abortable, Closeable {
* Disable table and wait on completion. May timeout eventually. Use
* {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
* {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
- * enabled state for it to be disabled. n * @throws IOException There could be couple types of
- * IOException TableNotFoundException means the table doesn't exist. TableNotEnabledException
- * means the table isn't in enabled state.
+ * enabled state for it to be disabled.
+ * @throws IOException There could be couple types of IOException TableNotFoundException means the
+ * table doesn't exist. TableNotEnabledException means the table isn't in
+ * enabled state.
*/
default void disableTable(TableName tableName) throws IOException {
get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
@@ -598,7 +599,7 @@ public interface Admin extends Abortable, Closeable {
* then it returns. It does not wait on the completion of Compaction (it can take a while).
* @param tableName table to compact
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
- * @throws IOException if a remote or network exception occurs n
+ * @throws IOException if a remote or network exception occurs
*/
void compact(TableName tableName, CompactType compactType)
throws IOException, InterruptedException;
@@ -610,7 +611,7 @@ public interface Admin extends Abortable, Closeable {
* @param tableName table to compact
* @param columnFamily column family within a table
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
- * @throws IOException if not a mob column family or if a remote or network exception occurs n
+ * @throws IOException if not a mob column family or if a remote or network exception occurs
*/
void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
throws IOException, InterruptedException;
@@ -659,7 +660,7 @@ public interface Admin extends Abortable, Closeable {
* while).
* @param tableName table to compact
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
- * @throws IOException if a remote or network exception occurs n
+ * @throws IOException if a remote or network exception occurs
*/
void majorCompact(TableName tableName, CompactType compactType)
throws IOException, InterruptedException;
@@ -671,7 +672,7 @@ public interface Admin extends Abortable, Closeable {
* @param tableName table to compact
* @param columnFamily column family within a table
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
- * @throws IOException if not a mob column family or if a remote or network exception occurs n
+ * @throws IOException if not a mob column family or if a remote or network exception occurs
*/
void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType)
throws IOException, InterruptedException;
@@ -1880,7 +1881,7 @@ public interface Admin extends Abortable, Closeable {
/**
* Return the set of supported security capabilities.
- * @throws IOException if a remote or network exception occurs n
+ * @throws IOException if a remote or network exception occurs
*/
List<SecurityCapability> getSecurityCapabilities() throws IOException;
@@ -2215,7 +2216,7 @@ public interface Admin extends Abortable, Closeable {
* Clear compacting queues on a regionserver.
* @param serverName the region server name
* @param queues the set of queue name
- * @throws IOException if a remote or network exception occurs n
+ * @throws IOException if a remote or network exception occurs
*/
void clearCompactionQueues(ServerName serverName, Set<String> queues)
throws IOException, InterruptedException;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 6304065ae37..81cf86ed207 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -63,7 +63,7 @@ public class Append extends Mutation {
* <p>
* This range is used as [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
- * @param maxStamp maximum timestamp value, exclusive n
+ * @param maxStamp maximum timestamp value, exclusive
*/
public Append setTimeRange(long minStamp, long maxStamp) {
tr = TimeRange.between(minStamp, maxStamp);
@@ -71,7 +71,7 @@ public class Append extends Mutation {
}
/**
- * Gets the TimeRange used for this append. n
+ * Gets the TimeRange used for this append.
*/
public TimeRange getTimeRange() {
return this.tr;
@@ -83,7 +83,7 @@ public class Append extends Mutation {
}
/**
- * n * True (default) if the append operation should return the results. A client that is not
+ * True (default) if the append operation should return the results. A client that is not
* interested in the result can save network bandwidth setting this to false.
*/
@Override
@@ -122,7 +122,7 @@ public class Append extends Mutation {
* Create a Append operation for the specified row.
* <p>
* At least one column must be appended to.
- * @param rowArray Makes a copy out of this buffer. nn
+ * @param rowArray Makes a copy out of this buffer.
*/
public Append(final byte[] rowArray, final int rowOffset, final int rowLength) {
checkRow(rowArray, rowOffset, rowLength);
@@ -144,7 +144,7 @@ public class Append extends Mutation {
* Add the specified column and value to this Append operation.
* @param family family name
* @param qualifier column qualifier
- * @param value value to append to specified column n
+ * @param value value to append to specified column
*/
public Append addColumn(byte[] family, byte[] qualifier, byte[] value) {
KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, KeyValue.Type.Put, value);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 19a0490e361..6070c553f5e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -211,7 +211,7 @@ public interface AsyncAdmin {
CompletableFuture<Void> enableTable(TableName tableName);
/**
- * Disable a table. The table has to be in enabled state for it to be disabled. n
+ * Disable a table. The table has to be in enabled state for it to be disabled.
*/
CompletableFuture<Void> disableTable(TableName tableName);
@@ -1156,7 +1156,7 @@ public interface AsyncAdmin {
CompletableFuture<Void> stopMaster();
/**
- * Stop the designated regionserver. n
+ * Stop the designated regionserver.
*/
CompletableFuture<Void> stopRegionServer(ServerName serverName);
@@ -1365,8 +1365,8 @@ public interface AsyncAdmin {
CompletableFuture<Boolean> normalize(NormalizeTableFilterParams ntfp);
/**
- * Turn the cleaner chore on/off. n * @return Previous cleaner state wrapped by a
- * {@link CompletableFuture}
+ * Turn the cleaner chore on/off.
+ * @return Previous cleaner state wrapped by a {@link CompletableFuture}
*/
CompletableFuture<Boolean> cleanerChoreSwitch(boolean on);
@@ -1385,8 +1385,8 @@ public interface AsyncAdmin {
CompletableFuture<Boolean> runCleanerChore();
/**
- * Turn the catalog janitor on/off. n * @return the previous state wrapped by a
- * {@link CompletableFuture}
+ * Turn the catalog janitor on/off.
+ * @return the previous state wrapped by a {@link CompletableFuture}
*/
CompletableFuture<Boolean> catalogJanitorSwitch(boolean on);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 6198086d503..1f29b556b12 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -185,7 +185,7 @@ public class AsyncConnectionImpl implements AsyncConnection {
}
/**
- * If choreService has not been created yet, create the ChoreService. n
+ * If choreService has not been created yet, create the ChoreService.
*/
synchronized ChoreService getChoreService() {
if (isClosed()) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
index 6a092a221fd..369b2be8ecd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
@@ -133,7 +133,7 @@ public interface ColumnFamilyDescriptor {
int getMinVersions();
/**
- * Get the mob compact partition policy for this family n
+ * Get the mob compact partition policy for this family
*/
MobCompactPartitionPolicy getMobCompactPartitionPolicy();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index 3c11bef53c7..42f25fdc56f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -861,8 +861,8 @@ public class ColumnFamilyDescriptorBuilder {
/**
* Set whether the tags should be compressed along with DataBlockEncoding. When no
- * DataBlockEncoding is been used, this is having no effect. n * @return this (for chained
- * invocation)
+ * DataBlockEncoding is been used, this is having no effect.
+ * @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) {
return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 8ec670d445f..f97db8a116d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -97,7 +97,7 @@ public class Delete extends Mutation {
* <p>
* This timestamp is ONLY used for a delete row operation. If specifying families or columns, you
* must specify each timestamp individually.
- * @param row We make a local copy of this passed in row. nn
+ * @param row We make a local copy of this passed in row.
*/
public Delete(final byte[] row, final int rowOffset, final int rowLength) {
this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@@ -141,7 +141,7 @@ public class Delete extends Mutation {
/**
* Add an existing delete marker to this Delete object.
* @param cell An existing cell of type "delete".
- * @return this for invocation chaining n
+ * @return this for invocation chaining
*/
@Override
public Delete add(Cell cell) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index f4e06101255..617f67b9a87 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -86,7 +86,7 @@ public class Get extends Query implements Row {
}
/**
- * Copy-constructor n
+ * Copy-constructor
*/
public Get(Get get) {
this(get.getRow());
@@ -125,7 +125,7 @@ public class Get extends Query implements Row {
}
/**
- * Create a Get operation for the specified row. nnn
+ * Create a Get operation for the specified row.
*/
public Get(byte[] row, int rowOffset, int rowLength) {
Mutation.checkRow(row, rowOffset, rowLength);
@@ -133,7 +133,7 @@ public class Get extends Query implements Row {
}
/**
- * Create a Get operation for the specified row. n
+ * Create a Get operation for the specified row.
*/
public Get(ByteBuffer row) {
Mutation.checkRow(row);
@@ -294,7 +294,7 @@ public class Get extends Query implements Row {
}
/**
- * Method for retrieving the get's row n
+ * Method for retrieving the get's row
*/
@Override
public byte[] getRow() {
@@ -326,7 +326,7 @@ public class Get extends Query implements Row {
}
/**
- * Method for retrieving the get's TimeRange n
+ * Method for retrieving the get's TimeRange
*/
public TimeRange getTimeRange() {
return this.tr;
@@ -357,7 +357,7 @@ public class Get extends Query implements Row {
}
/**
- * Method for retrieving the get's familyMap n
+ * Method for retrieving the get's familyMap
*/
public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
return this.familyMap;
@@ -365,7 +365,7 @@ public class Get extends Query implements Row {
/**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
- * and aggregation by debugging, logging, and administration tools. n
+ * and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map<String, Object> getFingerprint() {
@@ -382,7 +382,7 @@ public class Get extends Query implements Row {
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
- * @param maxCols a limit on the number of columns output prior to truncation n
+ * @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map<String, Object> toMap(int maxCols) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
index e4b177e3bca..aad853f8c06 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
@@ -94,7 +94,8 @@ public class Increment extends Mutation {
/**
* Add the specified KeyValue to this operation.
- * @param cell individual Cell n * @throws java.io.IOException e
+ * @param cell individual Cell
+ * @throws java.io.IOException e
*/
@Override
public Increment add(Cell cell) throws IOException {
@@ -123,7 +124,7 @@ public class Increment extends Mutation {
}
/**
- * Gets the TimeRange used for this increment. n
+ * Gets the TimeRange used for this increment.
*/
public TimeRange getTimeRange() {
return this.tr;
@@ -141,7 +142,7 @@ public class Increment extends Mutation {
* This range is used as [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
- * @throws IOException if invalid time range n
+ * @throws IOException if invalid time range
*/
public Increment setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = TimeRange.between(minStamp, maxStamp);
@@ -211,8 +212,7 @@ public class Increment extends Mutation {
}
/**
- * n
- */
+ * */
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index fbb76ea4f65..a9382f3a9be 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -179,7 +179,7 @@ class MutableRegionInfo implements RegionInfo {
}
/**
- * Get current table name of the region n
+ * Get current table name of the region
*/
@Override
public TableName getTable() {
@@ -231,7 +231,7 @@ class MutableRegionInfo implements RegionInfo {
/**
* Change the split status flag.
- * @param split set split status n
+ * @param split set split status
*/
public MutableRegionInfo setSplit(boolean split) {
this.split = split;
@@ -252,7 +252,7 @@ class MutableRegionInfo implements RegionInfo {
/**
* The parent of a region split is offline while split daughters hold references to the parent.
* Offlined regions are closed.
- * @param offLine Set online/offline status. n
+ * @param offLine Set online/offline status.
*/
public MutableRegionInfo setOffline(boolean offLine) {
this.offLine = offLine;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index df9e92f74dc..0be0325d499 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -205,7 +205,7 @@ public abstract class Mutation extends OperationWithAttributes
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
- * @param maxCols a limit on the number of columns output prior to truncation n
+ * @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map<String, Object> toMap(int maxCols) {
@@ -268,7 +268,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
- * Set the durability for this mutation n
+ * Set the durability for this mutation
*/
public Mutation setDurability(Durability d) {
this.durability = d;
@@ -281,7 +281,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
- * Method for retrieving the put's familyMap n
+ * Method for retrieving the put's familyMap
*/
public NavigableMap<byte[], List<Cell>> getFamilyCellMap() {
return this.familyMap;
@@ -296,7 +296,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
- * Method for retrieving the delete's row n
+ * Method for retrieving the delete's row
*/
@Override
public byte[] getRow() {
@@ -304,7 +304,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
- * Method for retrieving the timestamp. n
+ * Method for retrieving the timestamp.
*/
public long getTimestamp() {
return this.ts;
@@ -340,7 +340,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
- * Sets the visibility expression associated with cells in this Mutation. n
+ * Sets the visibility expression associated with cells in this Mutation.
*/
public Mutation setCellVisibility(CellVisibility expression) {
this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@@ -356,8 +356,8 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
- * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
- * protocol buffer CellVisibility
+ * Create a protocol buffer CellVisibility based on a client CellVisibility.
+ * @return a protocol buffer CellVisibility
*/
static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@@ -366,8 +366,8 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
- * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
- * client CellVisibility
+ * Convert a protocol buffer CellVisibility to a client CellVisibility
+ * @return the converted client CellVisibility
*/
private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
if (proto == null) return null;
@@ -375,8 +375,8 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
- * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
- * converted client CellVisibility n
+ * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
+ * @return the converted client CellVisibility
*/
private static CellVisibility toCellVisibility(byte[] protoBytes)
throws DeserializationException {
@@ -483,7 +483,7 @@ public abstract class Mutation extends OperationWithAttributes
/**
* Set the TTL desired for the result of the mutation, in milliseconds.
- * @param ttl the TTL desired for the result of the mutation, in milliseconds n
+ * @param ttl the TTL desired for the result of the mutation, in milliseconds
*/
public Mutation setTTL(long ttl) {
setAttribute(OP_ATTRIBUTE_TTL, Bytes.toBytes(ttl));
@@ -660,8 +660,9 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
- * @param row Row to check nn * @throws IllegalArgumentException Thrown if <code>row</code> is
- * empty or null or > {@link HConstants#MAX_ROW_LENGTH}
+ * @param row Row to check
+ * @throws IllegalArgumentException Thrown if <code>row</code> is empty or null or >
+ * {@link HConstants#MAX_ROW_LENGTH}
* @return <code>row</code>
*/
static byte[] checkRow(final byte[] row, final int offset, final int length) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
index a517f0bb43a..2cad5ef7325 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
@@ -94,7 +94,7 @@ public abstract class Operation {
/**
* Produces a string representation of this Operation. It defaults to a JSON representation, but
* falls back to a string representation of the fingerprint and details in the case of a JSON
- * encoding failure. n
+ * encoding failure.
*/
@Override
public String toString() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
index e34c9d6eacb..33c1d853e1a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
@@ -106,7 +106,7 @@ public abstract class OperationWithAttributes extends Operation implements Attri
* This method allows you to set an identifier on an operation. The original motivation for this
* was to allow the identifier to be used in slow query logging, but this could obviously be
* useful in other places. One use of this could be to put a class.method identifier in here to
- * see where the slow query is coming from. n * id to set for the scan
+ * see where the slow query is coming from. id to set for the scan
*/
public OperationWithAttributes setId(String id) {
setAttribute(ID_ATRIBUTE, Bytes.toBytes(id));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
index 5e821f07546..dc470069f90 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
@@ -61,7 +61,7 @@ public class Put extends Mutation implements HeapSize {
}
/**
- * We make a copy of the passed in row key to keep local. nnn
+ * We make a copy of the passed in row key to keep local.
*/
public Put(byte[] rowArray, int rowOffset, int rowLength) {
this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@@ -89,7 +89,7 @@ public class Put extends Mutation implements HeapSize {
}
/**
- * We make a copy of the passed in row key to keep local. nnnn
+ * We make a copy of the passed in row key to keep local.
*/
public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) {
checkRow(rowArray, rowOffset, rowLength);
@@ -156,7 +156,7 @@ public class Put extends Mutation implements HeapSize {
* Add the specified column and value to this Put operation.
* @param family family name
* @param qualifier column qualifier
- * @param value column value n
+ * @param value column value
*/
public Put addColumn(byte[] family, byte[] qualifier, byte[] value) {
return addColumn(family, qualifier, this.ts, value);
@@ -168,7 +168,7 @@ public class Put extends Mutation implements HeapSize {
* @param family family name
* @param qualifier column qualifier
* @param ts version timestamp
- * @param value column value n
+ * @param value column value
*/
public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) {
if (ts < 0) {
@@ -186,7 +186,7 @@ public class Put extends Mutation implements HeapSize {
* @param family family name
* @param qualifier column qualifier
* @param ts version timestamp
- * @param value column value n
+ * @param value column value
*/
public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) {
if (ts < 0) {
@@ -201,7 +201,8 @@ public class Put extends Mutation implements HeapSize {
/**
* Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is
* immutable and its backing array will not be modified for the duration of this Put.
- * @param cell individual cell n * @throws java.io.IOException e
+ * @param cell individual cell
+ * @throws java.io.IOException e
*/
@Override
public Put add(Cell cell) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index cf892ae0d74..944a7037682 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -47,9 +47,6 @@ public abstract class Query extends OperationWithAttributes {
protected Map<byte[], TimeRange> colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
protected Boolean loadColumnFamiliesOnDemand = null;
- /**
- * n
- */
public Filter getFilter() {
return filter;
}
@@ -67,7 +64,7 @@ public abstract class Query extends OperationWithAttributes {
}
/**
- * Sets the authorizations to be used by this Query n
+ * Sets the authorizations to be used by this Query
*/
public Query setAuthorizations(Authorizations authorizations) {
this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@@ -133,7 +130,7 @@ public abstract class Query extends OperationWithAttributes {
* Specify region replica id where Query will fetch data from. Use this together with
* {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a
* specific replicaId. <br>
- * <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing n
+ * <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing
*/
public Query setReplicaId(int Id) {
this.targetReplicaId = Id;
@@ -209,7 +206,7 @@ public abstract class Query extends OperationWithAttributes {
* Column Family time ranges take precedence over the global time range.
* @param cf the column family for which you want to restrict
* @param minStamp minimum timestamp value, inclusive
- * @param maxStamp maximum timestamp value, exclusive n
+ * @param maxStamp maximum timestamp value, exclusive
*/
public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
index 58163a2d74a..3f353b5799d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
@@ -59,7 +59,8 @@ public class RegionInfoDisplay {
}
/**
- * Get the start key for display. Optionally hide the real start key. nn * @return the startkey
+ * Get the start key for display. Optionally hide the real start key.
+ * @return the startkey
*/
public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
@@ -68,15 +69,16 @@ public class RegionInfoDisplay {
}
/**
- * Get the region name for display. Optionally hide the start key. nn * @return region name as
- * String
+ * Get the region name for display. Optionally hide the start key.
+ * @return region name as String
*/
public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuration conf) {
return Bytes.toStringBinary(getRegionNameForDisplay(ri, conf));
}
/**
- * Get the region name for display. Optionally hide the start key. nn * @return region name bytes
+ * Get the region name for display. Optionally hide the start key.
+ * @return region name bytes
*/
public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
index 1d6708b49d1..df1bfd61ca6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
@@ -84,7 +84,7 @@ public class RegionReplicaUtil {
}
/**
- * Removes the non-default replicas from the passed regions collection n
+ * Removes the non-default replicas from the passed regions collection
*/
public static void removeNonDefaultRegions(Collection<RegionInfo> regions) {
Iterator<RegionInfo> iterator = regions.iterator();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index a1ab6075fe7..86cdaaeef3c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -186,7 +186,7 @@ public class Result implements CellScannable, CellScanner {
/**
* Method for retrieving the row key that corresponds to the row from which this Result was
- * created. n
+ * created.
*/
public byte[] getRow() {
if (this.row == null) {
@@ -227,8 +227,9 @@ public class Result implements CellScannable, CellScanner {
* or Get) only requested 1 version the list will contain at most 1 entry. If the column did not
* exist in the result set (either the column does not exist or the column was not selected in the
* query) the list will be empty. Also see getColumnLatest which returns just a Cell
- * @param family the family n * @return a list of Cells for this column or empty list if the
- * column did not exist in the result set
+ * @param family the family
+ * @return a list of Cells for this column or empty list if the column did not exist in the result
+ * set
*/
public List<Cell> getColumnCells(byte[] family, byte[] qualifier) {
List<Cell> result = new ArrayList<>();
@@ -324,7 +325,7 @@ public class Result implements CellScannable, CellScanner {
}
/**
- * The Cell for the most recent timestamp for a given column. nn *
+ * The Cell for the most recent timestamp for a given column.
* @return the Cell for the column, or null if no value exists in the row or none have been
* selected in the query (Get/Scan)
*/
@@ -677,8 +678,7 @@ public class Result implements CellScannable, CellScanner {
}
/**
- * n
- */
+ * */
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
@@ -800,7 +800,8 @@ public class Result implements CellScannable, CellScanner {
}
/**
- * Get total size of raw cells n * @return Total size.
+ * Get total size of raw cells
+ * @return Total size.
*/
public static long getTotalSizeOfCells(Result result) {
long size = 0;
@@ -816,7 +817,7 @@ public class Result implements CellScannable, CellScanner {
/**
* Copy another Result into this one. Needed for the old Mapred framework
* @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT (which is supposed
- * to be immutable). n
+ * to be immutable).
*/
public void copyFrom(Result other) {
checkReadonly();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
index 74ff6de6f93..ebb27ceff75 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
@@ -86,7 +86,7 @@ public interface ResultScanner extends Closeable, Iterable<Result> {
* setting (or hbase.client.scanner.caching in hbase-site.xml).
* @param nbRows number of rows to return
* @return Between zero and nbRows rowResults. Scan is done if returned array is of zero-length
- * (We never return null). n
+ * (We never return null).
*/
default Result[] next(int nbRows) throws IOException {
List<Result> resultSets = new ArrayList<>(nbRows);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
index be44c26190b..0694d9e39e2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
@@ -38,8 +38,8 @@ public class RowMutations implements Row {
/**
* Create a {@link RowMutations} with the specified mutations.
- * @param mutations the mutations to send n * @throws IOException if any row in mutations is
- * different to another
+ * @param mutations the mutations to send
+ * @throws IOException if any row in mutations is different to another
*/
public static RowMutations of(List<? extends Mutation> mutations) throws IOException {
if (CollectionUtils.isEmpty(mutations)) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index bdca990ca54..b9adefb40cd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -259,7 +259,7 @@ public class Scan extends Query {
* Get all columns from the specified family.
* <p>
* Overrides previous calls to addColumn for this family.
- * @param family family name n
+ * @param family family name
*/
public Scan addFamily(byte[] family) {
familyMap.remove(family);
@@ -272,7 +272,7 @@ public class Scan extends Query {
* <p>
* Overrides previous calls to addFamily for this family.
* @param family family name
- * @param qualifier column qualifier n
+ * @param qualifier column qualifier
*/
public Scan addColumn(byte[] family, byte[] qualifier) {
NavigableSet<byte[]> set = familyMap.get(family);
@@ -294,7 +294,7 @@ public class Scan extends Query {
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @see #readAllVersions()
- * @see #readVersions(int) n
+ * @see #readVersions(int)
*/
public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = TimeRange.between(minStamp, maxStamp);
@@ -307,7 +307,7 @@ public class Scan extends Query {
* number of versions beyond the defaut.
* @param timestamp version timestamp
* @see #readAllVersions()
- * @see #readVersions(int) n
+ * @see #readVersions(int)
*/
public Scan setTimestamp(long timestamp) {
try {
@@ -336,9 +336,9 @@ public class Scan extends Query {
* {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
* unexpected or even undefined.
* </p>
- * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if
- * startRow does not meet criteria for a row key (when length exceeds
- * {@link HConstants#MAX_ROW_LENGTH})
+ * @param startRow row to start scanner at or after
+ * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+ * exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStartRow(byte[] startRow) {
return withStartRow(startRow, true);
@@ -355,9 +355,9 @@ public class Scan extends Query {
* unexpected or even undefined.
* </p>
* @param startRow row to start scanner at or after
- * @param inclusive whether we should include the start row when scan n * @throws
- * IllegalArgumentException if startRow does not meet criteria for a row key
- * (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
+ * @param inclusive whether we should include the start row when scan
+ * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+ * exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStartRow(byte[] startRow, boolean inclusive) {
if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
@@ -378,9 +378,9 @@ public class Scan extends Query {
* {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
* unexpected or even undefined.
* </p>
- * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does
- * not meet criteria for a row key (when length exceeds
- * {@link HConstants#MAX_ROW_LENGTH})
+ * @param stopRow row to end at (exclusive)
+ * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+ * exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow) {
return withStopRow(stopRow, false);
@@ -397,9 +397,9 @@ public class Scan extends Query {
* unexpected or even undefined.
* </p>
* @param stopRow row to end at
- * @param inclusive whether we should include the stop row when scan n * @throws
- * IllegalArgumentException if stopRow does not meet criteria for a row key (when
- * length exceeds {@link HConstants#MAX_ROW_LENGTH})
+ * @param inclusive whether we should include the stop row when scan
+ * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+ * exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow, boolean inclusive) {
if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
@@ -427,11 +427,10 @@ public class Scan extends Query {
* <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
* a combination will yield unexpected and even undefined results.
* </p>
- * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n
- * * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method
- * is considered to be confusing as it does not use a {@link Filter} but uses
- * setting the startRow and stopRow instead. Use
- * {@link #setStartStopRowForPrefixScan(byte[])} instead.
+ * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
+ * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method is considered to be
+ * confusing as it does not use a {@link Filter} but uses setting the startRow and
+ * stopRow instead. Use {@link #setStartStopRowForPrefixScan(byte[])} instead.
*/
@Deprecated
public Scan setRowPrefixFilter(byte[] rowPrefix) {
@@ -454,7 +453,7 @@ public class Scan extends Query {
* <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
* a combination will yield unexpected and even undefined results.
* </p>
- * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n
+ * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
*/
public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) {
if (rowPrefix == null) {
@@ -468,7 +467,7 @@ public class Scan extends Query {
}
/**
- * Get all available versions. n
+ * Get all available versions.
*/
public Scan readAllVersions() {
this.maxVersions = Integer.MAX_VALUE;
@@ -477,7 +476,7 @@ public class Scan extends Query {
/**
* Get up to the specified number of versions of each column.
- * @param versions specified number of versions for each column n
+ * @param versions specified number of versions for each column
*/
public Scan readVersions(int versions) {
this.maxVersions = versions;
@@ -555,7 +554,7 @@ public class Scan extends Query {
/**
* Setting the familyMap
- * @param familyMap map of family to qualifier n
+ * @param familyMap map of family to qualifier
*/
public Scan setFamilyMap(Map<byte[], NavigableSet<byte[]>> familyMap) {
this.familyMap = familyMap;
@@ -563,7 +562,7 @@ public class Scan extends Query {
}
/**
- * Getting the familyMap n
+ * Getting the familyMap
*/
public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
return this.familyMap;
@@ -638,16 +637,12 @@ public class Scan extends Query {
return this.caching;
}
- /**
- * n
- */
+ /** Returns TimeRange */
public TimeRange getTimeRange() {
return this.tr;
}
- /**
- * n
- */
+ /** Returns RowFilter */
@Override
public Filter getFilter() {
return filter;
@@ -682,7 +677,7 @@ public class Scan extends Query {
* Set whether this scan is a reversed one
* <p>
* This is false by default which means forward(normal) scan.
- * @param reversed if true, scan will be backward order n
+ * @param reversed if true, scan will be backward order
*/
public Scan setReversed(boolean reversed) {
this.reversed = reversed;
@@ -701,7 +696,8 @@ public class Scan extends Query {
* Setting whether the caller wants to see the partial results when server returns
* less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
* default this value is false and the complete results will be assembled client side before being
- * delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow()
+ * delivered to the caller.
+ * @see Result#mayHaveMoreCellsInRow()
* @see #setBatch(int)
*/
public Scan setAllowPartialResults(final boolean allowPartialResults) {
@@ -725,7 +721,7 @@ public class Scan extends Query {
/**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
- * and aggregation by debugging, logging, and administration tools. n
+ * and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map<String, Object> getFingerprint() {
@@ -747,7 +743,7 @@ public class Scan extends Query {
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
- * @param maxCols a limit on the number of columns output prior to truncation n
+ * @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map<String, Object> toMap(int maxCols) {
@@ -904,7 +900,7 @@ public class Scan extends Query {
* reaches this value.
* <p>
* This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
- * @param limit the limit of rows for this scan n
+ * @param limit the limit of rows for this scan
*/
public Scan setLimit(int limit) {
this.limit = limit;
@@ -913,7 +909,7 @@ public class Scan extends Query {
/**
* Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
- * set {@code readType} to {@link ReadType#PREAD}. n
+ * set {@code readType} to {@link ReadType#PREAD}.
*/
public Scan setOneRowLimit() {
return setLimit(1).setReadType(ReadType.PREAD);
@@ -935,7 +931,7 @@ public class Scan extends Query {
* Set the read type for this scan.
* <p>
* Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
- * example, we will always use pread if this is a get scan. n
+ * example, we will always use pread if this is a get scan.
*/
public Scan setReadType(ReadType readType) {
this.readType = readType;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 53c33a667c3..7feefc831ca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -116,7 +116,8 @@ public interface Table extends Closeable {
* @param results Empty Object[], same size as actions. Provides access to partial results, in
* case an exception is thrown. A null in the result array means that the call for
* that action failed, even after retries. The order of the objects in the results
- * array corresponds to the order of actions in the request list. n * @since 0.90.0
+ * array corresponds to the order of actions in the request list.
+ * @since 0.90.0
*/
default void batch(final List<? extends Row> actions, final Object[] results)
throws IOException, InterruptedException {
@@ -264,8 +265,8 @@ public interface Table extends Closeable {
* @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
* {@link #put(List)} runs pre-flight validations on the input list on client. Currently
* {@link #delete(List)} doesn't run validations on the client, there is no need
- * currently, but this may change in the future. An * {@link IllegalArgumentException}
- * will be thrown in this case.
+ * currently, but this may change in the future. An {@link IllegalArgumentException} will
+ * be thrown in this case.
*/
default void delete(List<Delete> deletes) throws IOException {
throw new NotImplementedException("Add an implementation!");
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
index f500a1128a5..1c91819ac4b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
@@ -153,7 +153,7 @@ public interface TableDescriptor {
String getRegionSplitPolicyClassName();
/**
- * Get the name of the table n
+ * Get the name of the table
*/
TableName getTableName();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index d0d3e36aa8f..43ca935ffa1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -886,7 +886,7 @@ public class TableDescriptorBuilder {
}
/**
- * Get the name of the table n
+ * Get the name of the table
*/
@Override
public TableName getTableName() {
@@ -1299,7 +1299,8 @@ public class TableDescriptorBuilder {
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
- * @param className Full class name. n * @return the modifyable TD
+ * @param className Full class name.
+ * @return the modifyable TD
*/
public ModifyableTableDescriptor setCoprocessor(String className) throws IOException {
return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className)
@@ -1347,8 +1348,8 @@ public class TableDescriptorBuilder {
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
- * @param specStr The Coprocessor specification all in in one String n * @return the modifyable
- * TD
+ * @param specStr The Coprocessor specification all in in one String
+ * @return the modifyable TD
* @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed
* in HBase 3.0.0.
*/
@@ -1461,8 +1462,8 @@ public class TableDescriptorBuilder {
/**
* @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix
- * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code> n
- * * @see #toByteArray()
+ * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code>
+ * @see #toByteArray()
*/
private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
index 4e20302be45..bf54f6e5904 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
@@ -42,7 +42,7 @@ public class TableState {
/**
* Covert from PB version of State
- * @param state convert from n
+ * @param state convert from
*/
public static State convert(HBaseProtos.TableState.State state) {
State ret;
@@ -66,7 +66,7 @@ public class TableState {
}
/**
- * Covert to PB version of State n
+ * Covert to PB version of State
*/
public HBaseProtos.TableState.State convert() {
HBaseProtos.TableState.State state;
@@ -140,7 +140,7 @@ public class TableState {
}
/**
- * Table name for state n
+ * Table name for state
*/
public TableName getTableName() {
return tableName;
@@ -168,7 +168,7 @@ public class TableState {
}
/**
- * Covert to PB version of TableState n
+ * Covert to PB version of TableState
*/
public HBaseProtos.TableState convert() {
return HBaseProtos.TableState.newBuilder().setState(this.state.convert()).build();
@@ -177,7 +177,7 @@ public class TableState {
/**
* Covert from PB version of TableState
* @param tableName table this state of
- * @param tableState convert from n
+ * @param tableState convert from
*/
public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) {
TableState.State state = State.convert(tableState.getState());
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
index ab5915ec975..76a0d6addf3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
@@ -33,7 +33,7 @@ public class ServerStatistics {
/**
* Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, as
- * something gets set nn
+ * something gets set
*/
public void update(byte[] region, RegionLoadStats currentStats) {
RegionStatistics regionStat = this.stats.get(region);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
index 7a266de3345..c705463b62c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
@@ -35,8 +35,8 @@ public class ServerSideScanMetrics {
private final Map<String, AtomicLong> counters = new HashMap<>();
/**
- * Create a new counter with the specified name n * @return {@link AtomicLong} instance for the
- * counter with counterName
+ * Create a new counter with the specified name
+ * @return {@link AtomicLong} instance for the counter with counterName
*/
protected AtomicLong createCounter(String counterName) {
AtomicLong c = new AtomicLong(0);
@@ -59,9 +59,6 @@ public class ServerSideScanMetrics {
*/
public final AtomicLong countOfRowsScanned = createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME);
- /**
- * nn
- */
public void setCounter(String counterName, long value) {
AtomicLong c = this.counters.get(counterName);
if (c != null) {
@@ -69,23 +66,16 @@ public class ServerSideScanMetrics {
}
}
- /**
- * n * @return true if a counter exists with the counterName
- */
+ /** Returns true if a counter exists with the counterName */
public boolean hasCounter(String counterName) {
return this.counters.containsKey(counterName);
}
- /**
- * n * @return {@link AtomicLong} instance for this counter name, null if counter does not exist.
- */
+ /** Returns {@link AtomicLong} instance for this counter name, null if counter does not exist. */
public AtomicLong getCounter(String counterName) {
return this.counters.get(counterName);
}
- /**
- * nn
- */
public void addToCounter(String counterName, long delta) {
AtomicLong c = this.counters.get(counterName);
if (c != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
index 73e3b53eb36..c8eab212446 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
@@ -52,36 +52,27 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Message;
public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Message,
R extends Message> {
- /**
- * nnn * @return value of type T n
- */
+ /** Returns value of type T */
public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException;
- /**
- * nn * @return sum or non null value among (if either of them is null); otherwise returns a null.
- */
+ /** Returns sum or non null value among (if either of them is null); otherwise returns a null. */
public abstract S add(S l1, S l2);
/**
- * returns the maximum value for this type T n
+ * returns the maximum value for this type T
*/
-
public abstract T getMaxValue();
public abstract T getMinValue();
- /**
- * nnn
- */
+ /** Returns multiplication */
public abstract S multiply(S o1, S o2);
- /**
- * nn
- */
+ /** Returns increment */
public abstract S increment(S o);
/**
- * provides casting opportunity between the data types. nn
+ * provides casting opportunity between the data types.
*/
public abstract S castToReturnType(T o);
@@ -96,7 +87,7 @@ public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Messa
/**
* used for computing average of <S> data values. Not providing the divide method that takes
- * two <S> values as it is not needed as of now. nnn
+ * two <S> values as it is not needed as of now.
*/
public abstract double divideForAvg(S o, Long l);
@@ -112,37 +103,37 @@ public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Messa
/**
* This method should initialize any field(s) of the ColumnInterpreter with a parsing of the
- * passed message bytes (used on the server side). n
+ * passed message bytes (used on the server side).
*/
public abstract void initialize(P msg);
/**
- * This method gets the PB message corresponding to the cell type n * @return the PB message for
- * the cell-type instance
+ * This method gets the PB message corresponding to the cell type
+ * @return the PB message for the cell-type instance
*/
public abstract Q getProtoForCellType(T t);
/**
- * This method gets the PB message corresponding to the cell type n * @return the cell-type
- * instance from the PB message
+ * This method gets the PB message corresponding to the cell type
+ * @return the cell-type instance from the PB message
*/
public abstract T getCellValueFromProto(Q q);
/**
- * This method gets the PB message corresponding to the promoted type n * @return the PB message
- * for the promoted-type instance
+ * This method gets the PB message corresponding to the promoted type
+ * @return the PB message for the promoted-type instance
*/
public abstract R getProtoForPromotedType(S s);
/**
- * This method gets the promoted type from the proto message n * @return the promoted-type
- * instance from the PB message
+ * This method gets the promoted type from the proto message
+ * @return the promoted-type instance from the PB message
*/
public abstract S getPromotedValueFromProto(R r);
/**
* The response message comes as type S. This will convert/cast it to T. In some sense, performs
- * the opposite of {@link #castToReturnType(Object)} nn
+ * the opposite of {@link #castToReturnType(Object)}
*/
public abstract T castToCellType(S response);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java
index ff9ed066fd4..de8e90ca9ec 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java
@@ -33,7 +33,7 @@ public class CoprocessorException extends DoNotRetryIOException {
}
/**
- * Constructor with a Class object and exception message. nn
+ * Constructor with a Class object and exception message.
*/
public CoprocessorException(Class<?> clazz, String s) {
super("Coprocessor [" + clazz.getName() + "]: " + s);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
index fd9936dc502..5f2b98c8370 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
@@ -150,7 +150,7 @@ public final class ClientExceptionsUtil {
/**
* Translates exception for preemptive fast fail checks.
* @param t exception to check
- * @return translated exception n
+ * @return translated exception
*/
public static Throwable translatePFFE(Throwable t) throws IOException {
if (t instanceof NoSuchMethodError) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
index ae15777a7f0..00774e37094 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
@@ -35,15 +35,13 @@ public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRet
}
/**
- * n
- */
+ * */
public FailedSanityCheckException(String message) {
super(message);
}
/**
- * nn
- */
+ * */
public FailedSanityCheckException(String message, Throwable cause) {
super(message, cause);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
index e7c06d44aef..1991100d0da 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
@@ -68,9 +68,7 @@ public class ColumnValueFilter extends FilterBase {
this.comparator = Preconditions.checkNotNull(comparator, "Comparator should not be null");
}
- /**
- * n
- */
+ /** Returns operator */
public CompareOperator getCompareOperator() {
return op;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
index a5f5efcaba1..8140793fc77 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
@@ -216,8 +216,9 @@ public abstract class Filter {
/**
* Concrete implementers can signal a failure condition in their code by throwing an
- * {@link IOException}. n * @return true if and only if the fields of the filter that are
- * serialized are equal to the corresponding fields in other. Used for testing.
+ * {@link IOException}.
+ * @return true if and only if the fields of the filter that are serialized are equal to the
+ * corresponding fields in other. Used for testing.
*/
abstract boolean areSerializedFieldsEqual(Filter other);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
index ff637c7f052..713c4acb270 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
@@ -134,9 +134,9 @@ public abstract class FilterBase extends Filter {
}
/**
- * Default implementation so that writers of custom filters aren't forced to implement. n
- * * @return true if and only if the fields of the filter that are serialized are equal to the
- * corresponding fields in other. Used for testing.
+ * Default implementation so that writers of custom filters aren't forced to implement.
+ * @return true if and only if the fields of the filter that are serialized are equal to the
+ * corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
index 3b7c136c6e1..cb42072e1d8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
@@ -84,7 +84,7 @@ final public class FilterList extends FilterBase {
/**
* Constructor that takes a var arg number of {@link Filter}s. The default operator MUST_PASS_ALL
- * is assumed. n
+ * is assumed.
*/
public FilterList(final Filter... filters) {
this(Operator.MUST_PASS_ALL, Arrays.asList(filters));
@@ -108,14 +108,14 @@ final public class FilterList extends FilterBase {
}
/**
- * Get the operator. n
+ * Get the operator.
*/
public Operator getOperator() {
return operator;
}
/**
- * Get the filters. n
+ * Get the filters.
*/
public List<Filter> getFilters() {
return filterListBase.getFilters();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
index 4a15af27726..9b0fd99dc94 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
@@ -92,7 +92,7 @@ public abstract class FilterListBase extends FilterBase {
* the current child, we should set the traverse result (transformed cell) of previous node(s) as
* the initial value. (HBASE-18879).
* @param c The cell in question.
- * @return the transformed cell. n
+ * @return the transformed cell.
*/
@Override
public Cell transformCell(Cell c) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 1506eca5df6..2feac5527f7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -116,7 +116,8 @@ public class FuzzyRowFilter extends FilterBase {
/**
* We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as
- * fixed positions n * @return mask array
+ * fixed positions
+ * @return mask array
*/
private byte[] preprocessMask(byte[] mask) {
if (!UNSAFE_UNALIGNED) {
@@ -588,8 +589,8 @@ public class FuzzyRowFilter extends FilterBase {
/**
* For forward scanner, next cell hint should not contain any trailing zeroes unless they are part
- * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01' nn * @param
- * toInc - position of incremented byte
+ * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01'
+ * @param toInc - position of incremented byte
* @return trimmed version of result
*/
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
index 099f38026fe..1fdf051941a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
@@ -37,7 +37,7 @@ public class RandomRowFilter extends FilterBase {
protected boolean filterOutRow;
/**
- * Create a new filter with a specified chance for a row to be included. n
+ * Create a new filter with a specified chance for a row to be included.
*/
public RandomRowFilter(float chance) {
this.chance = chance;
@@ -49,7 +49,7 @@ public class RandomRowFilter extends FilterBase {
}
/**
- * Set the chance that a row is included. n
+ * Set the chance that a row is included.
*/
public void setChance(float chance) {
this.chance = chance;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
index 14bdc04a754..3293a2106a9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
@@ -71,7 +71,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
}
/**
- * Constructor for protobuf deserialization only. nnnnnn
+ * Constructor for protobuf deserialization only.
*/
protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier,
final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
index 7be5ce91405..43b3316db77 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
@@ -111,7 +111,7 @@ public class SingleColumnValueFilter extends FilterBase {
}
/**
- * Constructor for protobuf deserialization only. nnnnnn
+ * Constructor for protobuf deserialization only.
*/
protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
index b3f821d75e4..235691ef7cb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
@@ -54,7 +54,7 @@ public class TimestampsFilter extends FilterBase {
long minTimestamp = Long.MAX_VALUE;
/**
- * Constructor for filter that retains only the specified timestamps in the list. n
+ * Constructor for filter that retains only the specified timestamps in the list.
*/
public TimestampsFilter(List<Long> timestamps) {
this(timestamps, false);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
index b2b3698aa2c..e7364ca3b42 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
@@ -104,9 +104,10 @@ class CellBlockBuilder {
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
- * <code>compressor</code>. nnn * @return Null or byte buffer filled with a cellblock filled with
- * passed-in Cells encoded using passed in <code>codec</code> and/or <code>compressor</code>; the
- * returned buffer has been flipped and is ready for reading. Use limit to find total size. n
+ * <code>compressor</code>.
+ * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
+ * passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has
+ * been flipped and is ready for reading. Use limit to find total size.
*/
public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
index d63f28cdab8..155c721b98a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
@@ -34,8 +34,7 @@ public class LeaseException extends DoNotRetryIOException {
}
/**
- * n
- */
+ * */
public LeaseException(String message) {
super(message);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
index 2e2a3a895ce..c0330034810 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
@@ -33,8 +33,7 @@ public class FailedLogCloseException extends IOException {
}
/**
- * n
- */
+ * */
public FailedLogCloseException(String msg) {
super(msg);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
index feab0b07f2f..a2a43203b64 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
@@ -32,8 +32,7 @@ public class FailedSyncBeforeLogCloseException extends FailedLogCloseException {
}
/**
- * n
- */
+ * */
public FailedSyncBeforeLogCloseException(String msg) {
super(msg);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
index 92ca03945aa..87b2287a601 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
@@ -50,7 +50,7 @@ public abstract class AbstractHBaseSaslRpcClient {
* @param token token to use if needed by the authentication method
* @param serverAddr the address of the hbase service
* @param securityInfo the security details for the remote hbase service
- * @param fallbackAllowed does the client allow fallback to simple authentication n
+ * @param fallbackAllowed does the client allow fallback to simple authentication
*/
protected AbstractHBaseSaslRpcClient(Configuration conf,
SaslClientAuthenticationProvider provider, Token<? extends TokenIdentifier> token,
@@ -66,7 +66,7 @@ public abstract class AbstractHBaseSaslRpcClient {
* @param serverAddr the address of the hbase service
* @param securityInfo the security details for the remote hbase service
* @param fallbackAllowed does the client allow fallback to simple authentication
- * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") n
+ * @param rpcProtection the protection level ("authentication", "integrity" or "privacy")
*/
protected AbstractHBaseSaslRpcClient(Configuration conf,
SaslClientAuthenticationProvider provider, Token<? extends TokenIdentifier> token,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index 5a816877ba8..6c755f9a94c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -62,7 +62,7 @@ public final class EncryptionUtil {
* @param conf configuration
* @param key the raw key bytes
* @param algorithm the algorithm to use with this key material
- * @return the encrypted key bytes n
+ * @return the encrypted key bytes
*/
public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm)
throws IOException {
@@ -115,7 +115,7 @@ public final class EncryptionUtil {
* @param conf configuration
* @param subject subject key alias
* @param value the encrypted key bytes
- * @return the raw key bytes nn
+ * @return the raw key bytes
*/
public static Key unwrapKey(Configuration conf, String subject, byte[] value)
throws IOException, KeyException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
index 93ad9245f65..0394bb0f2a3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
@@ -86,7 +86,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
* Do client side SASL authentication with server via the given InputStream and OutputStream
* @param inS InputStream to use
* @param outS OutputStream to use
- * @return true if connection is set up, or false if needs to switch to simple Auth. n
+ * @return true if connection is set up, or false if needs to switch to simple Auth.
*/
public boolean saslConnect(InputStream inS, OutputStream outS) throws IOException {
DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
@@ -185,7 +185,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
/**
* Get a SASL wrapped InputStream. Can be called only after saslConnect() has been called.
- * @return a SASL wrapped InputStream n
+ * @return a SASL wrapped InputStream
*/
public InputStream getInputStream() throws IOException {
if (!saslClient.isComplete()) {
@@ -248,7 +248,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
/**
* Get a SASL wrapped OutputStream. Can be called only after saslConnect() has been called.
- * @return a SASL wrapped OutputStream n
+ * @return a SASL wrapped OutputStream
*/
public OutputStream getOutputStream() throws IOException {
if (!saslClient.isComplete()) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
index e30041d46c4..2ea60f8ed57 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
@@ -45,7 +45,7 @@ public class AccessControlClient {
/**
* Return true if authorization is supported and enabled
* @param connection The connection to use
- * @return true if authorization is supported and enabled, false otherwise n
+ * @return true if authorization is supported and enabled, false otherwise
*/
public static boolean isAuthorizationEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
@@ -55,7 +55,7 @@ public class AccessControlClient {
/**
* Return true if cell authorization is supported and enabled
* @param connection The connection to use
- * @return true if cell authorization is supported and enabled, false otherwise n
+ * @return true if cell authorization is supported and enabled, false otherwise
*/
public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
@@ -146,7 +146,7 @@ public class AccessControlClient {
/**
* Grant global permissions for the specified user. If permissions for the specified user exists,
- * later granted permissions will override previous granted permissions. nnnn
+ * later granted permissions will override previous granted permissions.
*/
public static void grant(Connection connection, final String userName,
final Permission.Action... actions) throws Throwable {
@@ -162,7 +162,7 @@ public class AccessControlClient {
/**
* Revokes the permission on the table
- * @param connection The Connection instance to use nnnnnn
+ * @param connection The Connection instance to use
*/
public static void revoke(Connection connection, final TableName tableName, final String username,
final byte[] family, final byte[] qualifier, final Permission.Action... actions)
@@ -173,7 +173,7 @@ public class AccessControlClient {
/**
* Revokes the permission on the namespace for the specified user.
- * @param connection The Connection instance to use nnnn
+ * @param connection The Connection instance to use
*/
public static void revoke(Connection connection, final String namespace, final String userName,
final Permission.Action... actions) throws Throwable {
@@ -197,7 +197,7 @@ public class AccessControlClient {
* along with the list of superusers would be returned. Else, no rows get returned.
* @param connection The Connection instance to use
* @param tableRegex The regular expression string to match against
- * @return List of UserPermissions n
+ * @return List of UserPermissions
*/
public static List<UserPermission> getUserPermissions(Connection connection, String tableRegex)
throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
index e0eb79aa025..970c3f2b04d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
@@ -450,8 +450,8 @@ public class AccessControlUtil {
* It's also called by the shell, in case you want to find references.
* @param protocol the AccessControlService protocol proxy
* @param userShortName the short name of the user to grant permissions
- * @param actions the permissions to be granted n * @deprecated Use
- * {@link Admin#grant(UserPermission, boolean)} instead.
+ * @param actions the permissions to be granted
+ * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
@@ -478,8 +478,8 @@ public class AccessControlUtil {
* @param tableName optional table name
* @param f optional column family
* @param q optional qualifier
- * @param actions the permissions to be granted n * @deprecated Use
- * {@link Admin#grant(UserPermission, boolean)} instead.
+ * @param actions the permissions to be granted
+ * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
@@ -504,8 +504,8 @@ public class AccessControlUtil {
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
* @param namespace the short name of the user to grant permissions
- * @param actions the permissions to be granted n * @deprecated Use
- * {@link Admin#grant(UserPermission, boolean)} instead.
+ * @param actions the permissions to be granted
+ * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
@@ -621,9 +621,8 @@ public class AccessControlUtil {
* A utility used to get user's global permissions based on the specified user name.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
- * @param userName User name, if empty then all user permissions will be retrieved. n
- * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
- * instead.
+ * @param userName User name, if empty then all user permissions will be retrieved.
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -651,8 +650,8 @@ public class AccessControlUtil {
* It's also called by the shell, in case you want to find references.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
- * @param t optional table name n * @deprecated Use
- * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+ * @param t optional table name
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -668,9 +667,8 @@ public class AccessControlUtil {
* @param t optional table name
* @param columnFamily Column family
* @param columnQualifier Column qualifier
- * @param userName User name, if empty then all user permissions will be retrieved. n
- * * @deprecated Use
- * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+ * @param userName User name, if empty then all user permissions will be retrieved.
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -708,8 +706,8 @@ public class AccessControlUtil {
* It's also called by the shell, in case you want to find references.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
- * @param namespace name of the namespace n * @deprecated Use
- * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+ * @param namespace name of the namespace
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -722,9 +720,8 @@ public class AccessControlUtil {
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
* @param namespace name of the namespace
- * @param userName User name, if empty then all user permissions will be retrieved. n
- * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
- * instead.
+ * @param userName User name, if empty then all user permissions will be retrieved.
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -762,8 +759,8 @@ public class AccessControlUtil {
* will not be considered if columnFamily is passed as null or empty.
* @param userName User name, it shouldn't be null or empty.
* @param actions Actions
- * @return true if access allowed, otherwise false n * @deprecated Use
- * {@link Admin#hasUserPermissions(String, List)} instead.
+ * @return true if access allowed, otherwise false
+ * @deprecated Use {@link Admin#hasUserPermissions(String, List)} instead.
*/
@Deprecated
public static boolean hasPermission(RpcController controller,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
index 7bae98d59ba..931f976f2f4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
@@ -55,7 +55,7 @@ public class VisibilityClient {
/**
* Return true if cell visibility features are supported and enabled
* @param connection The connection to use
- * @return true if cell visibility features are supported and enabled, false otherwise n
+ * @return true if cell visibility features are supported and enabled, false otherwise
*/
public static boolean isCellVisibilityEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
@@ -63,7 +63,7 @@ public class VisibilityClient {
}
/**
- * Utility method for adding label to the system. nnnn
+ * Utility method for adding label to the system.
*/
public static VisibilityLabelsResponse addLabel(Connection connection, final String label)
throws Throwable {
@@ -71,7 +71,7 @@ public class VisibilityClient {
}
/**
- * Utility method for adding labels to the system. nnnn
+ * Utility method for adding labels to the system.
*/
public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels)
throws Throwable {
@@ -109,7 +109,7 @@ public class VisibilityClient {
}
/**
- * Sets given labels globally authorized for the user. nnnnn
+ * Sets given labels globally authorized for the user.
*/
public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths,
final String user) throws Throwable {
@@ -154,7 +154,7 @@ public class VisibilityClient {
* Retrieve the list of visibility labels defined in the system.
* @param connection The Connection instance to use.
* @param regex The regular expression to filter which labels are returned.
- * @return labels The list of visibility labels defined in the system. n
+ * @return labels The list of visibility labels defined in the system.
*/
public static ListLabelsResponse listLabels(Connection connection, final String regex)
throws Throwable {
@@ -190,7 +190,7 @@ public class VisibilityClient {
}
/**
- * Removes given labels from user's globally authorized list of labels. nnnnn
+ * Removes given labels from user's globally authorized list of labels.
*/
public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths,
final String user) throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 35c361be562..079ddbb4218 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -562,7 +562,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Get to a client Get
* @param proto the protocol buffer Get to convert
- * @return the converted client Get n
+ * @return the converted client Get
*/
public static Get toGet(final ClientProtos.Get proto) throws IOException {
if (proto == null) return null;
@@ -647,7 +647,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Mutate to a Put.
* @param proto The protocol buffer MutationProto to convert
- * @return A client Put. n
+ * @return A client Put.
*/
public static Put toPut(final MutationProto proto) throws IOException {
return toPut(proto, null);
@@ -657,7 +657,7 @@ public final class ProtobufUtil {
* Convert a protocol buffer Mutate to a Put.
* @param proto The protocol buffer MutationProto to convert
* @param cellScanner If non-null, the Cell data that goes with this proto.
- * @return A client Put. n
+ * @return A client Put.
*/
public static Put toPut(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@@ -741,7 +741,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Mutate to a Delete
* @param proto the protocol buffer Mutate to convert
- * @return the converted client Delete n
+ * @return the converted client Delete
*/
public static Delete toDelete(final MutationProto proto) throws IOException {
return toDelete(proto, null);
@@ -751,7 +751,7 @@ public final class ProtobufUtil {
* Convert a protocol buffer Mutate to a Delete
* @param proto the protocol buffer Mutate to convert
* @param cellScanner if non-null, the data that goes with this delete.
- * @return the converted client Delete n
+ * @return the converted client Delete
*/
public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@@ -920,7 +920,7 @@ public final class ProtobufUtil {
/**
* Convert a MutateRequest to Mutation
* @param proto the protocol buffer Mutate to convert
- * @return the converted Mutation n
+ * @return the converted Mutation
*/
public static Mutation toMutation(final MutationProto proto) throws IOException {
MutationType type = proto.getMutateType();
@@ -968,7 +968,7 @@ public final class ProtobufUtil {
/**
* Convert a client Scan to a protocol buffer Scan
* @param scan the client Scan to convert
- * @return the converted protocol buffer Scan n
+ * @return the converted protocol buffer Scan
*/
public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
@@ -1062,7 +1062,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Scan to a client Scan
* @param proto the protocol buffer Scan to convert
- * @return the converted client Scan n
+ * @return the converted client Scan
*/
public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
byte[] startRow = HConstants.EMPTY_START_ROW;
@@ -1182,7 +1182,7 @@ public final class ProtobufUtil {
/**
* Create a protocol buffer Get based on a client Get.
* @param get the client Get
- * @return a protocol buffer Get n
+ * @return a protocol buffer Get
*/
public static ClientProtos.Get toGet(final Get get) throws IOException {
ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder();
@@ -1248,7 +1248,8 @@ public final class ProtobufUtil {
}
/**
- * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n
+ * Create a protocol buffer Mutate based on a client Mutation
+ * @return a protobuf'd Mutation
*/
public static MutationProto toMutation(final MutationType type, final Mutation mutation,
final long nonce) throws IOException {
@@ -1297,8 +1298,8 @@ public final class ProtobufUtil {
/**
* Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
- * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a
- * protobuf'd Mutation n
+ * Understanding is that the Cell will be transported other than via protobuf.
+ * @return a protobuf'd Mutation
*/
public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation,
final MutationProto.Builder builder) throws IOException {
@@ -1307,8 +1308,8 @@ public final class ProtobufUtil {
/**
* Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
- * Understanding is that the Cell will be transported other than via protobuf. nn * @return a
- * protobuf'd Mutation n
+ * Understanding is that the Cell will be transported other than via protobuf.
+ * @return a protobuf'd Mutation
*/
public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation)
throws IOException {
@@ -1334,8 +1335,8 @@ public final class ProtobufUtil {
/**
* Code shared by {@link #toMutation(MutationType, Mutation)} and
- * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd
- * Mutation.
+ * {@link #toMutationNoData(MutationType, Mutation)}
+ * @return A partly-filled out protobuf'd Mutation.
*/
private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
final Mutation mutation, MutationProto.Builder builder) {
@@ -1468,7 +1469,7 @@ public final class ProtobufUtil {
* Convert a protocol buffer Result to a client Result
* @param proto the protocol buffer Result to convert
* @param scanner Optional cell scanner.
- * @return the converted client Result n
+ * @return the converted client Result
*/
public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner)
throws IOException {
@@ -1583,8 +1584,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer
- * DeleteType n
+ * Convert a delete KeyValue type to protocol buffer DeleteType.
+ * @return protocol buffer DeleteType
*/
public static DeleteType toDeleteType(KeyValue.Type type) throws IOException {
switch (type) {
@@ -1604,7 +1605,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer DeleteType to delete KeyValue type.
* @param type The DeleteType
- * @return The type. n
+ * @return The type.
*/
public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException {
switch (type) {
@@ -1690,7 +1691,7 @@ public final class ProtobufUtil {
}
/**
- * A helper to close a region given a region name using admin protocol. nnn
+ * A helper to close a region given a region name using admin protocol.
*/
public static void closeRegion(final RpcController controller,
final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName)
@@ -1705,7 +1706,7 @@ public final class ProtobufUtil {
}
/**
- * A helper to warmup a region given a region name using admin protocol nn *
+ * A helper to warmup a region given a region name using admin protocol
*/
public static void warmupRegion(final RpcController controller,
final AdminService.BlockingInterface admin,
@@ -1722,7 +1723,7 @@ public final class ProtobufUtil {
}
/**
- * A helper to open a region using admin protocol. nnn
+ * A helper to open a region using admin protocol.
*/
public static void openRegion(final RpcController controller,
final AdminService.BlockingInterface admin, ServerName server,
@@ -1736,8 +1737,8 @@ public final class ProtobufUtil {
}
/**
- * A helper to get the all the online regions on a region server using admin protocol. n * @return
- * a list of online region info n
+ * A helper to get the all the online regions on a region server using admin protocol.
+ * @return a list of online region info
*/
public static List<org.apache.hadoop.hbase.client.RegionInfo>
getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException {
@@ -2069,7 +2070,8 @@ public final class ProtobufUtil {
/**
* Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to
* NOT print out data both because it can be big but also so we do not have data in our logs. Use
- * judiciously. n * @return toString of passed <code>m</code>
+ * judiciously.
+ * @return toString of passed <code>m</code>
*/
public static String getShortTextFormat(Message m) {
if (m == null) return "null";
@@ -2216,8 +2218,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
- * client CellVisibility
+ * Convert a protocol buffer CellVisibility to a client CellVisibility
+ * @return the converted client CellVisibility
*/
public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
if (proto == null) return null;
@@ -2225,8 +2227,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
- * converted client CellVisibility n
+ * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
+ * @return the converted client CellVisibility
*/
public static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null) return null;
@@ -2242,8 +2244,8 @@ public final class ProtobufUtil {
}
/**
- * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
- * protocol buffer CellVisibility
+ * Create a protocol buffer CellVisibility based on a client CellVisibility.
+ * @return a protocol buffer CellVisibility
*/
public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@@ -2252,8 +2254,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer Authorizations to a client Authorizations n * @return the converted
- * client Authorizations
+ * Convert a protocol buffer Authorizations to a client Authorizations
+ * @return the converted client Authorizations
*/
public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) {
if (proto == null) return null;
@@ -2261,8 +2263,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer Authorizations bytes to a client Authorizations n * @return the
- * converted client Authorizations n
+ * Convert a protocol buffer Authorizations bytes to a client Authorizations
+ * @return the converted client Authorizations
*/
public static Authorizations toAuthorizations(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null) return null;
@@ -2278,8 +2280,8 @@ public final class ProtobufUtil {
}
/**
- * Create a protocol buffer Authorizations based on a client Authorizations. n * @return a
- * protocol buffer Authorizations
+ * Create a protocol buffer Authorizations based on a client Authorizations.
+ * @return a protocol buffer Authorizations
*/
public static ClientProtos.Authorizations toAuthorizations(Authorizations authorizations) {
ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder();
@@ -2290,8 +2292,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer TimeUnit to a client TimeUnit n * @return the converted client
- * TimeUnit
+ * Convert a protocol buffer TimeUnit to a client TimeUnit
+ * @return the converted client TimeUnit
*/
public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) {
switch (proto) {
@@ -2314,8 +2316,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a client TimeUnit to a protocol buffer TimeUnit n * @return the converted protocol
- * buffer TimeUnit
+ * Convert a client TimeUnit to a protocol buffer TimeUnit
+ * @return the converted protocol buffer TimeUnit
*/
public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) {
switch (timeUnit) {
@@ -2338,8 +2340,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer ThrottleType to a client ThrottleType n * @return the converted
- * client ThrottleType
+ * Convert a protocol buffer ThrottleType to a client ThrottleType
+ * @return the converted client ThrottleType
*/
public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) {
switch (proto) {
@@ -2367,8 +2369,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a client ThrottleType to a protocol buffer ThrottleType n * @return the converted
- * protocol buffer ThrottleType
+ * Convert a client ThrottleType to a protocol buffer ThrottleType
+ * @return the converted protocol buffer ThrottleType
*/
public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) {
switch (type) {
@@ -2396,8 +2398,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer QuotaScope to a client QuotaScope n * @return the converted client
- * QuotaScope
+ * Convert a protocol buffer QuotaScope to a client QuotaScope
+ * @return the converted client QuotaScope
*/
public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) {
switch (proto) {
@@ -2410,8 +2412,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a client QuotaScope to a protocol buffer QuotaScope n * @return the converted protocol
- * buffer QuotaScope
+ * Convert a client QuotaScope to a protocol buffer QuotaScope
+ * @return the converted protocol buffer QuotaScope
*/
public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) {
switch (scope) {
@@ -2424,8 +2426,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer QuotaType to a client QuotaType n * @return the converted client
- * QuotaType
+ * Convert a protocol buffer QuotaType to a client QuotaType
+ * @return the converted client QuotaType
*/
public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) {
switch (proto) {
@@ -2438,8 +2440,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a client QuotaType to a protocol buffer QuotaType n * @return the converted protocol
- * buffer QuotaType
+ * Convert a client QuotaType to a protocol buffer QuotaType
+ * @return the converted protocol buffer QuotaType
*/
public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) {
switch (type) {
@@ -2566,7 +2568,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding
* buffers
* @param builder current message builder
- * @param in Inputsream with delimited protobuf data n
+ * @param in Inputsream with delimited protobuf data
*/
public static void mergeDelimitedFrom(Message.Builder builder, InputStream in)
throws IOException {
@@ -2588,7 +2590,7 @@ public final class ProtobufUtil {
* where the message size is known
* @param builder current message builder
* @param in InputStream containing protobuf data
- * @param size known size of protobuf data n
+ * @param size known size of protobuf data
*/
public static void mergeFrom(Message.Builder builder, InputStream in, int size)
throws IOException {
@@ -2602,7 +2604,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers
* where the message size is not known
* @param builder current message builder
- * @param in InputStream containing protobuf data n
+ * @param in InputStream containing protobuf data
*/
public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException {
final CodedInputStream codedInput = CodedInputStream.newInstance(in);
@@ -2615,7 +2617,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with ByteStrings
* @param builder current message builder
- * @param bs ByteString containing the n
+ * @param bs ByteString containing the
*/
public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException {
final CodedInputStream codedInput = bs.newCodedInput();
@@ -2628,7 +2630,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with byte arrays
* @param builder current message builder
- * @param b byte array n
+ * @param b byte array
*/
public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException {
final CodedInputStream codedInput = CodedInputStream.newInstance(b);
@@ -2641,7 +2643,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with byte arrays
* @param builder current message builder
- * @param b byte array nnn
+ * @param b byte array
*/
public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length)
throws IOException {
@@ -2821,7 +2823,7 @@ public final class ProtobufUtil {
/**
* Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state
- * @param state the protobuf CompactionState n
+ * @param state the protobuf CompactionState
*/
public static CompactionState createCompactionState(GetRegionInfoResponse.CompactionState state) {
return CompactionState.valueOf(state.toString());
@@ -2833,7 +2835,7 @@ public final class ProtobufUtil {
/**
* Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state
- * @param state the protobuf CompactionState n
+ * @param state the protobuf CompactionState
*/
public static CompactionState
createCompactionStateForRegionLoad(RegionLoad.CompactionState state) {
@@ -2938,9 +2940,7 @@ public final class ProtobufUtil {
stats.getCompactionPressure());
}
- /**
- * n * @return A String version of the passed in <code>msg</code>
- */
+ /** Returns A String version of the passed in <code>msg</code> */
public static String toText(Message msg) {
return TextFormat.shortDebugString(msg);
}
@@ -2950,7 +2950,7 @@ public final class ProtobufUtil {
}
/**
- * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. n
+ * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it.
*/
public static <T> T call(Callable<T> callable) throws IOException {
try {
@@ -3061,7 +3061,7 @@ public final class ProtobufUtil {
* magic and that is then followed by a protobuf that has a serialized
* {@link ServerName} in it.
* @return Returns null if <code>data</code> is null else converts passed data to a ServerName
- * instance. n
+ * instance.
*/
public static ServerName parseServerNameFrom(final byte[] data) throws DeserializationException {
if (data == null || data.length <= 0) return null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index f678a43986d..9c88b61fd67 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -195,7 +195,7 @@ public final class RequestConverter {
/**
* Create a protocol buffer MutateRequest for a conditioned put/delete/increment/append
- * @return a mutate request n
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final byte[] row,
final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value,
@@ -215,7 +215,7 @@ public final class RequestConverter {
/**
* Create a protocol buffer MultiRequest for conditioned row mutations
- * @return a multi request n
+ * @return a multi request
*/
public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName,
final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op,
@@ -272,7 +272,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MutateRequest for a put nn * @return a mutate request n
+ * Create a protocol buffer MutateRequest for a put
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Put put)
throws IOException {
@@ -284,7 +285,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MutateRequest for an append nn * @return a mutate request n
+ * Create a protocol buffer MutateRequest for an append
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Append append,
long nonceGroup, long nonce) throws IOException {
@@ -300,7 +302,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MutateRequest for a client increment nn * @return a mutate request
+ * Create a protocol buffer MutateRequest for a client increment
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Increment increment,
final long nonceGroup, final long nonce) throws IOException {
@@ -316,7 +319,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MutateRequest for a delete nn * @return a mutate request n
+ * Create a protocol buffer MutateRequest for a delete
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete)
throws IOException {
@@ -336,7 +340,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer ScanRequest for a client Scan nnnn * @return a scan request n
+ * Create a protocol buffer ScanRequest for a client Scan
+ * @return a scan request
*/
public static ScanRequest buildScanRequest(byte[] regionName, Scan scan, int numberOfRows,
boolean closeScanner) throws IOException {
@@ -356,7 +361,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer ScanRequest for a scanner id nnn * @return a scan request
+ * Create a protocol buffer ScanRequest for a scanner id
+ * @return a scan request
*/
public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
boolean trackMetrics) {
@@ -371,7 +377,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer ScanRequest for a scanner id nnnn * @return a scan request
+ * Create a protocol buffer ScanRequest for a scanner id
+ * @return a scan request
*/
public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) {
@@ -391,7 +398,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer bulk load request nnnnnn * @return a bulk load request
+ * Create a protocol buffer bulk load request
+ * @return a bulk load request
*/
public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
@@ -457,7 +465,7 @@ public final class RequestConverter {
* @param mutationBuilder mutationBuilder to be used to build mutation.
* @param nonceGroup nonceGroup to be applied.
* @param indexMap Map of created RegionAction to the original index for a
- * RowMutations/CheckAndMutate within the original list of actions n
+ * RowMutations/CheckAndMutate within the original list of actions
*/
public static void buildNoDataRegionActions(final byte[] regionName,
final Iterable<Action> actions, final List<CellScannable> cells,
@@ -825,7 +833,8 @@ public final class RequestConverter {
/**
* Create a CompactRegionRequest for a given region name
* @param regionName the name of the region to get info
- * @param major indicator if it is a major compaction n * @return a CompactRegionRequest
+ * @param major indicator if it is a major compaction
+ * @return a CompactRegionRequest
*/
public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
byte[] columnFamily) {
@@ -883,7 +892,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer AddColumnRequest nn * @return an AddColumnRequest
+ * Create a protocol buffer AddColumnRequest
+ * @return an AddColumnRequest
*/
public static AddColumnRequest buildAddColumnRequest(final TableName tableName,
final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -896,7 +906,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer DeleteColumnRequest nn * @return a DeleteColumnRequest
+ * Create a protocol buffer DeleteColumnRequest
+ * @return a DeleteColumnRequest
*/
public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName,
final byte[] columnName, final long nonceGroup, final long nonce) {
@@ -909,7 +920,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer ModifyColumnRequest nn * @return an ModifyColumnRequest
+ * Create a protocol buffer ModifyColumnRequest
+ * @return an ModifyColumnRequest
*/
public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName,
final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -935,7 +947,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MoveRegionRequest nn * @return A MoveRegionRequest
+ * Create a protocol buffer MoveRegionRequest
+ * @return A MoveRegionRequest
*/
public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName,
ServerName destServerName) {
@@ -976,7 +989,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer AssignRegionRequest n * @return an AssignRegionRequest
+ * Create a protocol buffer AssignRegionRequest
+ * @return an AssignRegionRequest
*/
public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) {
AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder();
@@ -985,7 +999,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer UnassignRegionRequest n * @return an UnassignRegionRequest
+ * Creates a protocol buffer UnassignRegionRequest
+ * @return an UnassignRegionRequest
*/
public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) {
UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder();
@@ -994,7 +1009,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer OfflineRegionRequest n * @return an OfflineRegionRequest
+ * Creates a protocol buffer OfflineRegionRequest
+ * @return an OfflineRegionRequest
*/
public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) {
OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder();
@@ -1003,7 +1019,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer DeleteTableRequest n * @return a DeleteTableRequest
+ * Creates a protocol buffer DeleteTableRequest
+ * @return a DeleteTableRequest
*/
public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
@@ -1031,7 +1048,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer EnableTableRequest n * @return an EnableTableRequest
+ * Creates a protocol buffer EnableTableRequest
+ * @return an EnableTableRequest
*/
public static EnableTableRequest buildEnableTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
@@ -1043,7 +1061,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer DisableTableRequest n * @return a DisableTableRequest
+ * Creates a protocol buffer DisableTableRequest
+ * @return a DisableTableRequest
*/
public static DisableTableRequest buildDisableTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
@@ -1055,7 +1074,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer CreateTableRequest nn * @return a CreateTableRequest
+ * Creates a protocol buffer CreateTableRequest
+ * @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor,
final byte[][] splitKeys, final long nonceGroup, final long nonce) {
@@ -1072,7 +1092,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer ModifyTableRequest nn * @return a ModifyTableRequest
+ * Creates a protocol buffer ModifyTableRequest
+ * @return a ModifyTableRequest
*/
public static ModifyTableRequest buildModifyTableRequest(final TableName tableName,
final TableDescriptor tableDesc, final long nonceGroup, final long nonce) {
@@ -1096,7 +1117,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer GetTableDescriptorsRequest n * @return a GetTableDescriptorsRequest
+ * Creates a protocol buffer GetTableDescriptorsRequest
+ * @return a GetTableDescriptorsRequest
*/
public static GetTableDescriptorsRequest
buildGetTableDescriptorsRequest(final List<TableName> tableNames) {
@@ -1193,7 +1215,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer SetBalancerRunningRequest nn * @return a SetBalancerRunningRequest
+ * Creates a protocol buffer SetBalancerRunningRequest
+ * @return a SetBalancerRunningRequest
*/
public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on,
boolean synchronous) {
@@ -1278,8 +1301,8 @@ public final class RequestConverter {
}
/**
- * Creates a request for querying the master the last flushed sequence Id for a region n * @return
- * A {@link GetLastFlushedSequenceIdRequest}
+ * Creates a request for querying the master the last flushed sequence Id for a region
+ * @return A {@link GetLastFlushedSequenceIdRequest}
*/
public static GetLastFlushedSequenceIdRequest
buildGetLastFlushedSequenceIdRequest(byte[] regionName) {
@@ -1330,7 +1353,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer SetNormalizerRunningRequest n * @return a SetNormalizerRunningRequest
+ * Creates a protocol buffer SetNormalizerRunningRequest
+ * @return a SetNormalizerRunningRequest
*/
public static SetNormalizerRunningRequest buildSetNormalizerRunningRequest(boolean on) {
return SetNormalizerRunningRequest.newBuilder().setOn(on).build();
@@ -1438,7 +1462,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer CreateNamespaceRequest n * @return a CreateNamespaceRequest
+ * Creates a protocol buffer CreateNamespaceRequest
+ * @return a CreateNamespaceRequest
*/
public static CreateNamespaceRequest
buildCreateNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1448,7 +1473,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer ModifyNamespaceRequest n * @return a ModifyNamespaceRequest
+ * Creates a protocol buffer ModifyNamespaceRequest
+ * @return a ModifyNamespaceRequest
*/
public static ModifyNamespaceRequest
buildModifyNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1458,7 +1484,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer DeleteNamespaceRequest n * @return a DeleteNamespaceRequest
+ * Creates a protocol buffer DeleteNamespaceRequest
+ * @return a DeleteNamespaceRequest
*/
public static DeleteNamespaceRequest buildDeleteNamespaceRequest(final String name) {
DeleteNamespaceRequest.Builder builder = DeleteNamespaceRequest.newBuilder();
@@ -1467,8 +1494,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer GetNamespaceDescriptorRequest n * @return a
- * GetNamespaceDescriptorRequest
+ * Creates a protocol buffer GetNamespaceDescriptorRequest
+ * @return a GetNamespaceDescriptorRequest
*/
public static GetNamespaceDescriptorRequest
buildGetNamespaceDescriptorRequest(final String name) {
@@ -1592,7 +1619,7 @@ public final class RequestConverter {
/**
* Creates IsSnapshotCleanupEnabledRequest to determine if auto snapshot cleanup based on TTL
- * expiration is turned on n
+ * expiration is turned on
*/
public static IsSnapshotCleanupEnabledRequest buildIsSnapshotCleanupEnabledRequest() {
return IsSnapshotCleanupEnabledRequest.newBuilder().build();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
index 440891382e7..09cbc460f22 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
@@ -90,7 +90,7 @@ public final class ResponseConverter {
* @param request the original protocol buffer MultiRequest
* @param response the protocol buffer MultiResponse to convert
* @param cells Cells to go with the passed in <code>proto</code>. Can be null.
- * @return the results that were in the MultiResponse (a Result or an Exception). n
+ * @return the results that were in the MultiResponse (a Result or an Exception).
*/
public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
final MultiResponse response, final CellScanner cells) throws IOException {
@@ -103,7 +103,7 @@ public final class ResponseConverter {
* @param indexMap Used to support RowMutations/CheckAndMutate in batch
* @param response the protocol buffer MultiResponse to convert
* @param cells Cells to go with the passed in <code>proto</code>. Can be null.
- * @return the results that were in the MultiResponse (a Result or an Exception). n
+ * @return the results that were in the MultiResponse (a Result or an Exception).
*/
public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
final Map<Integer, Integer> indexMap, final MultiResponse response, final CellScanner cells)
@@ -247,7 +247,8 @@ public final class ResponseConverter {
}
/**
- * Wrap a throwable to an action result. n * @return an action result builder
+ * Wrap a throwable to an action result.
+ * @return an action result builder
*/
public static ResultOrException.Builder buildActionResult(final Throwable t) {
ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -256,7 +257,8 @@ public final class ResponseConverter {
}
/**
- * Wrap a throwable to an action result. n * @return an action result builder
+ * Wrap a throwable to an action result.
+ * @return an action result builder
*/
public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r) {
ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -264,9 +266,7 @@ public final class ResponseConverter {
return builder;
}
- /**
- * n * @return NameValuePair of the exception name to stringified version os exception.
- */
+ /** Returns NameValuePair of the exception name to stringified version os exception. */
public static NameBytesPair buildException(final Throwable t) {
NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder();
parameterBuilder.setName(t.getClass().getName());
@@ -307,7 +307,8 @@ public final class ResponseConverter {
}
/**
- * A utility to build a GetServerInfoResponse. nn * @return the response
+ * A utility to build a GetServerInfoResponse.
+ * @return the response
*/
public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName,
final int webuiPort) {
@@ -322,7 +323,8 @@ public final class ResponseConverter {
}
/**
- * A utility to build a GetOnlineRegionResponse. n * @return the response
+ * A utility to build a GetOnlineRegionResponse.
+ * @return the response
*/
public static GetOnlineRegionResponse
buildGetOnlineRegionResponse(final List<RegionInfo> regions) {
@@ -405,7 +407,7 @@ public final class ResponseConverter {
}
/**
- * Create Results from the cells using the cells meta data. nnn
+ * Create Results from the cells using the cells meta data.
*/
public static Result[] getResults(CellScanner cellScanner, ScanResponse response)
throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
index 091515c325e..2787b5ab7f9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
@@ -91,7 +91,7 @@ public class Writables {
* @return The passed Writable after its readFields has been called fed by the passed
* <code>bytes</code> array or IllegalArgumentException if passed null or an empty
* <code>bytes</code> array.
- * @throws IOException e n
+ * @throws IOException e
*/
public static Writable getWritable(final byte[] bytes, final Writable w) throws IOException {
return getWritable(bytes, 0, bytes.length, w);
@@ -107,7 +107,7 @@ public class Writables {
* @return The passed Writable after its readFields has been called fed by the passed
* <code>bytes</code> array or IllegalArgumentException if passed null or an empty
* <code>bytes</code> array.
- * @throws IOException e n
+ * @throws IOException e
*/
public static Writable getWritable(final byte[] bytes, final int offset, final int length,
final Writable w) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
index 97c8302b221..8d3fcd2c342 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
@@ -144,7 +144,7 @@ public class ZNodePaths {
/**
* Parses the meta replicaId from the passed path.
- * @param path the name of the full path which includes baseZNode. n
+ * @param path the name of the full path which includes baseZNode.
*/
public int getMetaReplicaIdFromPath(String path) {
// Extract the znode from path. The prefix is of the following format.
@@ -155,7 +155,7 @@ public class ZNodePaths {
/**
* Parse the meta replicaId from the passed znode
- * @param znode the name of the znode, does not include baseZNode n
+ * @param znode the name of the znode, does not include baseZNode
*/
public int getMetaReplicaIdFromZNode(String znode) {
return znode.equals(metaZNodePrefix)
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
index cc329cd3d03..cce3ba4e4e3 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
@@ -42,7 +42,7 @@ public class TestDeleteTimeStamp {
private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier");
/*
- * Test for verifying that the timestamp in delete object is being honored. n
+ * Test for verifying that the timestamp in delete object is being honored.
*/
@Test
public void testTimeStamp() {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index 27cf51e7c9f..d7eef52a4f9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -100,7 +100,8 @@ public final class AuthUtil {
/**
* For kerberized cluster, return login user (from kinit or from keytab if specified). For
* non-kerberized cluster, return system user.
- * @param conf configuartion file n * @throws IOException login exception
+ * @param conf configuartion file
+ * @throws IOException login exception
*/
@InterfaceAudience.Private
public static User loginClient(Configuration conf) throws IOException {
@@ -160,7 +161,8 @@ public final class AuthUtil {
* <p>
* NOT recommend to use to method unless you're sure what you're doing, it is for canary only.
* Please use User#loginClient.
- * @param conf configuration file n * @throws IOException login exception
+ * @param conf configuration file
+ * @throws IOException login exception
*/
private static User loginClientAsService(Configuration conf) throws IOException {
UserProvider provider = UserProvider.instantiate(conf);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
index e5050b864ca..a29a98a8c09 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
@@ -54,7 +54,7 @@ public class ByteBufferKeyOnlyKeyValue extends ByteBufferExtendedCell {
/**
* A setter that helps to avoid object creation every time and whenever there is a need to create
- * new OffheapKeyOnlyKeyValue. nnn
+ * new OffheapKeyOnlyKeyValue.
*/
public void setKey(ByteBuffer key, int offset, int length) {
setKey(key, offset, length, ByteBufferUtils.toShort(key, offset));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
index 28128ee37c6..677ed2295ce 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
@@ -48,7 +48,7 @@ public interface CellBuilder {
Cell build();
/**
- * Remove all internal elements from builder. n
+ * Remove all internal elements from builder.
*/
CellBuilder clear();
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index b4d3b5549db..2c19c0f1043 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -661,9 +661,8 @@ public class CellComparatorImpl implements CellComparator {
/**
* Compares the row part of the cell with a simple plain byte[] like the stopRow in Scan. This
* should be used with context where for hbase:meta cells the
- * {{@link MetaCellComparator#META_COMPARATOR} should be used n * the cell to be compared n * the
- * kv serialized byte[] to be compared with n * the offset in the byte[] n * the length in the
- * byte[]
+ * {{@link MetaCellComparator#META_COMPARATOR} should be used the cell to be compared the kv
+ * serialized byte[] to be compared with the offset in the byte[] the length in the byte[]
* @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger than byte[], -1
* otherwise
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index c28d0d87525..80dcf8c505d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -77,7 +77,8 @@ public final class CellUtil {
/**
* Makes a column in family:qualifier form from separate byte arrays.
* <p>
- * Not recommended for usage as this is old-style API. nn * @return family:qualifier
+ * Not recommended for usage as this is old-style API.
+ * @return family:qualifier
*/
public static byte[] makeColumn(byte[] family, byte[] qualifier) {
return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier);
@@ -292,9 +293,7 @@ public final class CellUtil {
return destinationOffset + vlen;
}
- /**
- * n * @return CellScanner interface over <code>cellIterables</code>
- */
+ /** Returns CellScanner interface over <code>cellIterables</code> */
public static CellScanner
createCellScanner(final List<? extends CellScannable> cellScannerables) {
return new CellScanner() {
@@ -320,17 +319,15 @@ public final class CellUtil {
};
}
- /**
- * n * @return CellScanner interface over <code>cellIterable</code>
- */
+ /** Returns CellScanner interface over <code>cellIterable</code> */
public static CellScanner createCellScanner(final Iterable<Cell> cellIterable) {
if (cellIterable == null) return null;
return createCellScanner(cellIterable.iterator());
}
/**
- * n * @return CellScanner interface over <code>cellIterable</code> or null if <code>cells</code>
- * is null
+ * Returns CellScanner interface over <code>cellIterable</code> or null if <code>cells</code> is
+ * null
*/
public static CellScanner createCellScanner(final Iterator<Cell> cells) {
if (cells == null) return null;
@@ -352,9 +349,7 @@ public final class CellUtil {
};
}
- /**
- * n * @return CellScanner interface over <code>cellArray</code>
- */
+ /** Returns CellScanner interface over <code>cellArray</code> */
public static CellScanner createCellScanner(final Cell[] cellArray) {
return new CellScanner() {
private final Cell[] cells = cellArray;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
index ddbf71cac13..432556d2642 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
@@ -145,7 +145,7 @@ public class CompoundConfiguration extends Configuration {
/**
* Add Bytes map to config list. This map is generally created by HTableDescriptor or
* HColumnDescriptor, but can be abstractly used. The added configuration overrides the previous
- * ones if there are name collisions. n * Bytes map
+ * ones if there are name collisions. Bytes map
* @return this, for builder pattern
*/
public CompoundConfiguration addBytesMap(final Map<Bytes, Bytes> map) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
index b3b7a1c5e57..28e648ec466 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
@@ -41,7 +41,7 @@ public interface ExtendedCell extends RawCell, HeapSize {
* <tags></code>
* @param out Stream to which cell has to be written
* @param withTags Whether to write tags.
- * @return how many bytes are written. n
+ * @return how many bytes are written.
*/
// TODO remove the boolean param once HBASE-16706 is done.
default int write(OutputStream out, boolean withTags) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
index 5d428d0b434..5fc030581da 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
@@ -182,7 +182,7 @@ public class HBaseConfiguration extends Configuration {
* @param conf configuration instance for accessing the passwords
* @param alias the name of the password element
* @param defPass the default password
- * @return String password or default password n
+ * @return String password or default password
*/
public static String getPassword(Configuration conf, String alias, String defPass)
throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 59c54b0c031..3661c063e88 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -1840,8 +1840,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
* Compare columnFamily, qualifier, timestamp, and key type (everything except the row). This
* method is used both in the normal comparator and the "same-prefix" comparator. Note that we
* are assuming that row portions of both KVs have already been parsed and found identical, and
- * we don't validate that assumption here. n * the length of the common prefix of the two
- * key-values being compared, including row length and row
+ * we don't validate that assumption here. the length of the common prefix of the two key-values
+ * being compared, including row length and row
*/
private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, int llength,
byte[] right, int roffset, int rlength, short rowlength) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
index 4291d904fe8..ed3687e9ed4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
@@ -55,8 +55,8 @@ public class KeyValueTestUtil {
/**
* Checks whether KeyValues from kvCollection2 are contained in kvCollection1. The comparison is
- * made without distinguishing MVCC version of the KeyValues nn * @return true if KeyValues from
- * kvCollection2 are contained in kvCollection1
+ * made without distinguishing MVCC version of the KeyValues
+ * @return true if KeyValues from kvCollection2 are contained in kvCollection1
*/
public static boolean containsIgnoreMvccVersion(Collection<? extends Cell> kvCollection1,
Collection<? extends Cell> kvCollection2) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index bdf77d511af..71f1da9a8a6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -58,8 +58,8 @@ public class KeyValueUtil {
/**
* Returns number of bytes this cell's key part would have been used if serialized as in
- * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type. n * @return the
- * key length
+ * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type.
+ * @return the key length
*/
public static int keyLength(final Cell cell) {
return keyLength(cell.getRowLength(), cell.getFamilyLength(), cell.getQualifierLength());
@@ -96,8 +96,8 @@ public class KeyValueUtil {
}
/**
- * The position will be set to the beginning of the new ByteBuffer n * @return the Bytebuffer
- * containing the key part of the cell
+ * The position will be set to the beginning of the new ByteBuffer
+ * @return the Bytebuffer containing the key part of the cell
*/
public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
byte[] bytes = new byte[keyLength(cell)];
@@ -107,8 +107,8 @@ public class KeyValueUtil {
}
/**
- * Copies the key to a new KeyValue n * @return the KeyValue that consists only the key part of
- * the incoming cell
+ * Copies the key to a new KeyValue
+ * @return the KeyValue that consists only the key part of the incoming cell
*/
public static KeyValue toNewKeyCell(final Cell cell) {
byte[] bytes = new byte[keyLength(cell)];
@@ -203,7 +203,7 @@ public class KeyValueUtil {
/**
* Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
- * position to the start of the next KeyValue. Does not allocate a new array or copy data. nnn
+ * position to the start of the next KeyValue. Does not allocate a new array or copy data.
*/
public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion,
boolean includesTags) {
@@ -236,7 +236,8 @@ public class KeyValueUtil {
/**
* Decrement the timestamp. For tests (currently wasteful) Remember timestamps are sorted reverse
- * chronologically. n * @return previous key
+ * chronologically.
+ * @return previous key
*/
public static KeyValue previousKey(final KeyValue in) {
return createFirstOnRow(CellUtil.cloneRow(in), CellUtil.cloneFamily(in),
@@ -246,9 +247,8 @@ public class KeyValueUtil {
/**
* Create a KeyValue for the specified row, family and qualifier that would be larger than or
* equal to all other possible KeyValues that have the same row, family, qualifier. Used for
- * reseeking. Should NEVER be returned to a client. n * row key n * row offset n * row length n *
- * family name n * family offset n * family length n * column qualifier n * qualifier offset n *
- * qualifier length
+ * reseeking. Should NEVER be returned to a client. row key row offset row length family name
+ * family offset family length column qualifier qualifier offset qualifier length
* @return Last possible key on passed row, family, qualifier.
*/
public static KeyValue createLastOnRow(final byte[] row, final int roffset, final int rlength,
@@ -408,11 +408,11 @@ public class KeyValueUtil {
/*************** misc **********************************/
/**
- * n * @return <code>cell</code> if it is an object of class {@link KeyValue} else we will return
- * a new {@link KeyValue} instance made from <code>cell</code> Note: Even if the cell is an object
- * of any of the subclass of {@link KeyValue}, we will create a new {@link KeyValue} object
- * wrapping same buffer. This API is used only with MR based tools which expect the type to be
- * exactly KeyValue. That is the reason for doing this way.
+ * @return <code>cell</code> if it is an object of class {@link KeyValue} else we will return a
+ * new {@link KeyValue} instance made from <code>cell</code> Note: Even if the cell is an
+ * object of any of the subclass of {@link KeyValue}, we will create a new
+ * {@link KeyValue} object wrapping same buffer. This API is used only with MR based tools
+ * which expect the type to be exactly KeyValue. That is the reason for doing this way.
* @deprecated without any replacement.
*/
@Deprecated
@@ -444,8 +444,9 @@ public class KeyValueUtil {
}
/**
- * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. nn
- * * @return Length written on stream n * @see #create(DataInput) for the inverse function
+ * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
+ * @return Length written on stream
+ * @see #create(DataInput) for the inverse function
*/
public static long write(final KeyValue kv, final DataOutput out) throws IOException {
// This is how the old Writables write used to serialize KVs. Need to figure
@@ -639,7 +640,7 @@ public class KeyValueUtil {
* @param in inputStream to read.
* @param withTags whether the keyvalue should include tags are not
* @return Created KeyValue OR if we find a length of zero, we will return null which can be
- * useful marking a stream as done. n
+ * useful marking a stream as done.
*/
public static KeyValue createKeyValueFromInputStream(InputStream in, boolean withTags)
throws IOException {
@@ -663,24 +664,24 @@ public class KeyValueUtil {
}
/**
- * n * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
- * hfile index members to KeyValues.
+ * Returns a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+ * index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final byte[] b) {
return createKeyValueFromKey(b, 0, b.length);
}
/**
- * n * @return A KeyValue made of a byte buffer that holds the key-only part. Needed to convert
- * hfile index members to KeyValues.
+ * Return a KeyValue made of a byte buffer that holds the key-only part. Needed to convert hfile
+ * index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final ByteBuffer bb) {
return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit());
}
/**
- * nnn * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
- * hfile index members to KeyValues.
+ * Return a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+ * index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final int l) {
byte[] newb = new byte[l + KeyValue.ROW_OFFSET];
@@ -691,19 +692,19 @@ public class KeyValueUtil {
}
/**
- * n * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied
- * from the steam.
+ * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied from
+ * the steam.
* @return KeyValue created by deserializing from <code>in</code> OR if we find a length of zero,
- * we will return null which can be useful marking a stream as done. n
+ * we will return null which can be useful marking a stream as done.
*/
public static KeyValue create(final DataInput in) throws IOException {
return create(in.readInt(), in);
}
/**
- * Create a KeyValue reading <code>length</code> from <code>in</code> nn * @return Created
- * KeyValue OR if we find a length of zero, we will return null which can be useful marking a
- * stream as done. n
+ * Create a KeyValue reading <code>length</code> from <code>in</code>
+ * @return Created KeyValue OR if we find a length of zero, we will return null which can be
+ * useful marking a stream as done.
*/
public static KeyValue create(int length, final DataInput in) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
index 1b035966da2..58c4b2d1cf1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
@@ -1046,7 +1046,7 @@ public final class PrivateCellUtil {
* Writes the row from the given cell to the output stream excluding the common prefix
* @param out The dataoutputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param rlength the row length n
+ * @param rlength the row length
*/
public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength,
int commonPrefix) throws IOException {
@@ -1234,7 +1234,6 @@ public final class PrivateCellUtil {
/**
* Compares only the key portion of a cell. It does not include the sequence id/mvcc of the cell
- * nn
* @return an int greater than 0 if left > than right lesser than 0 if left < than right
* equal to 0 if left is equal to right
*/
@@ -2195,7 +2194,7 @@ public final class PrivateCellUtil {
/**
* Writes the Cell's key part as it would have serialized in a KeyValue. The format is <2 bytes
* rk len><rk><1 byte cf len><cf><qualifier><8 bytes
- * timestamp><1 byte type> nnn
+ * timestamp><1 byte type>
*/
public static void writeFlatKey(Cell cell, DataOutput out) throws IOException {
short rowLen = cell.getRowLength();
@@ -2227,7 +2226,7 @@ public final class PrivateCellUtil {
/**
* Deep clones the given cell if the cell supports deep cloning
* @param cell the cell to be cloned
- * @return the cloned cell n
+ * @return the cloned cell
*/
public static Cell deepClone(Cell cell) throws CloneNotSupportedException {
if (cell instanceof ExtendedCell) {
@@ -2241,7 +2240,7 @@ public final class PrivateCellUtil {
* @param cell the cell to be written
* @param out the outputstream
* @param withTags if tags are to be written or not
- * @return the total bytes written n
+ * @return the total bytes written
*/
public static int writeCell(Cell cell, OutputStream out, boolean withTags) throws IOException {
if (cell instanceof ExtendedCell) {
@@ -2316,8 +2315,8 @@ public final class PrivateCellUtil {
/**
* Sets the given seqId to the cell. Marked as audience Private as of 1.2.0. Setting a Cell
- * sequenceid is an internal implementation detail not for general public use. nn * @throws
- * IOException when the passed cell is not of type {@link ExtendedCell}
+ * sequenceid is an internal implementation detail not for general public use.
+ * @throws IOException when the passed cell is not of type {@link ExtendedCell}
*/
public static void setSequenceId(Cell cell, long seqId) throws IOException {
if (cell instanceof ExtendedCell) {
@@ -2329,8 +2328,8 @@ public final class PrivateCellUtil {
}
/**
- * Sets the given timestamp to the cell. nn * @throws IOException when the passed cell is not of
- * type {@link ExtendedCell}
+ * Sets the given timestamp to the cell.
+ * @throws IOException when the passed cell is not of type {@link ExtendedCell}
*/
public static void setTimestamp(Cell cell, long ts) throws IOException {
if (cell instanceof ExtendedCell) {
@@ -2386,7 +2385,7 @@ public final class PrivateCellUtil {
* Writes the row from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param rlength the row length n
+ * @param rlength the row length
*/
public static void writeRow(OutputStream out, Cell cell, short rlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2401,7 +2400,7 @@ public final class PrivateCellUtil {
* Writes the family from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param flength the family length n
+ * @param flength the family length
*/
public static void writeFamily(OutputStream out, Cell cell, byte flength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2416,7 +2415,7 @@ public final class PrivateCellUtil {
* Writes the qualifier from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param qlength the qualifier length n
+ * @param qlength the qualifier length
*/
public static void writeQualifier(OutputStream out, Cell cell, int qlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2432,7 +2431,7 @@ public final class PrivateCellUtil {
* Writes the qualifier from the given cell to the output stream excluding the common prefix
* @param out The dataoutputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param qlength the qualifier length n
+ * @param qlength the qualifier length
*/
public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell, int qlength,
int commonPrefix) throws IOException {
@@ -2451,7 +2450,7 @@ public final class PrivateCellUtil {
* Writes the value from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param vlength the value length n
+ * @param vlength the value length
*/
public static void writeValue(OutputStream out, Cell cell, int vlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2466,7 +2465,7 @@ public final class PrivateCellUtil {
* Writes the tag from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param tagsLength the tag length n
+ * @param tagsLength the tag length
*/
public static void writeTags(OutputStream out, Cell cell, int tagsLength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2499,7 +2498,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the rowkey bytes of the given cell into an int value n * @return rowkey as int
+ * Converts the rowkey bytes of the given cell into an int value
+ * @return rowkey as int
*/
public static int getRowAsInt(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2510,7 +2510,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the value bytes of the given cell into a long value n * @return value as long
+ * Converts the value bytes of the given cell into a long value
+ * @return value as long
*/
public static long getValueAsLong(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2521,7 +2522,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the value bytes of the given cell into a int value n * @return value as int
+ * Converts the value bytes of the given cell into a int value
+ * @return value as int
*/
public static int getValueAsInt(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2532,7 +2534,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the value bytes of the given cell into a double value n * @return value as double
+ * Converts the value bytes of the given cell into a double value
+ * @return value as double
*/
public static double getValueAsDouble(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2543,7 +2546,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the value bytes of the given cell into a BigDecimal n * @return value as BigDecimal
+ * Converts the value bytes of the given cell into a BigDecimal
+ * @return value as BigDecimal
*/
public static BigDecimal getValueAsBigDecimal(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2764,8 +2768,9 @@ public final class PrivateCellUtil {
/**
* Estimate based on keyvalue's serialization format in the RPC layer. Note that there is an extra
* SIZEOF_INT added to the size here that indicates the actual length of the cell for cases where
- * cell's are serialized in a contiguous format (For eg in RPCs). n * @return Estimate of the
- * <code>cell</code> size in bytes plus an extra SIZEOF_INT indicating the actual cell length.
+ * cell's are serialized in a contiguous format (For eg in RPCs).
+ * @return Estimate of the <code>cell</code> size in bytes plus an extra SIZEOF_INT indicating the
+ * actual cell length.
*/
public static int estimatedSerializedSizeOf(final Cell cell) {
return cell.getSerializedSize() + Bytes.SIZEOF_INT;
@@ -2785,9 +2790,9 @@ public final class PrivateCellUtil {
/**
* This method exists just to encapsulate how we serialize keys. To be replaced by a factory that
* we query to figure what the Cell implementation is and then, what serialization engine to use
- * and further, how to serialize the key for inclusion in hfile index. TODO. n * @return The key
- * portion of the Cell serialized in the old-school KeyValue way or null if passed a null
- * <code>cell</code>
+ * and further, how to serialize the key for inclusion in hfile index. TODO.
+ * @return The key portion of the Cell serialized in the old-school KeyValue way or null if passed
+ * a null <code>cell</code>
*/
public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) {
if (cell == null) return null;
@@ -2797,8 +2802,8 @@ public final class PrivateCellUtil {
}
/**
- * Create a Cell that is smaller than all other possible Cells for the given Cell's row. n
- * * @return First possible Cell on passed Cell's row.
+ * Create a Cell that is smaller than all other possible Cells for the given Cell's row.
+ * @return First possible Cell on passed Cell's row.
*/
public static Cell createFirstOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2862,8 +2867,8 @@ public final class PrivateCellUtil {
/**
* Create a Cell that is smaller than all other possible Cells for the given Cell's rk:cf and
- * passed qualifier. nnnn * @return Last possible Cell on passed Cell's rk:cf and passed
- * qualifier.
+ * passed qualifier.
+ * @return Last possible Cell on passed Cell's rk:cf and passed qualifier.
*/
public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2883,7 +2888,7 @@ public final class PrivateCellUtil {
* Creates the first cell with the row/family/qualifier of this cell and the given timestamp. Uses
* the "maximum" type that guarantees that the new cell is the lowest possible for this
* combination of row, family, qualifier, and timestamp. This cell's own timestamp is ignored.
- * @param cell - cell n
+ * @param cell - cell
*/
public static Cell createFirstOnRowColTS(Cell cell, long ts) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2901,8 +2906,8 @@ public final class PrivateCellUtil {
}
/**
- * Create a Cell that is larger than all other possible Cells for the given Cell's row. n
- * * @return Last possible Cell on passed Cell's row.
+ * Create a Cell that is larger than all other possible Cells for the given Cell's row.
+ * @return Last possible Cell on passed Cell's row.
*/
public static Cell createLastOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2919,7 +2924,8 @@ public final class PrivateCellUtil {
/**
* Create a Cell that is larger than all other possible Cells for the given Cell's rk:cf:q. Used
* in creating "fake keys" for the multi-column Bloom filter optimization to skip the row/column
- * we already know is not in the file. n * @return Last possible Cell on passed Cell's rk:cf:q.
+ * we already know is not in the file.
+ * @return Last possible Cell on passed Cell's rk:cf:q.
*/
public static Cell createLastOnRowCol(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
index be8e4e769ba..9a2a29356b1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
@@ -95,7 +95,7 @@ public abstract class BaseDecoder implements Codec.Decoder {
/**
* Extract a Cell.
* @return a parsed Cell or throws an Exception. EOFException or a generic IOException maybe
- * thrown if EOF is reached prematurely. Does not return null. n
+ * thrown if EOF is reached prematurely. Does not return null.
*/
@NonNull
protected abstract Cell parseCell() throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
index e7facdbfbf2..f4552c03826 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
@@ -62,7 +62,7 @@ public class CellCodec implements Codec {
}
/**
- * Write int length followed by array bytes. nnnn
+ * Write int length followed by array bytes.
*/
private void write(final byte[] bytes, final int offset, final int length) throws IOException {
// TODO add BB backed os check and do for write. Pass Cell
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
index 75e3d48d9fa..07bfb53d5df 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
@@ -65,7 +65,7 @@ public class CellCodecWithTags implements Codec {
}
/**
- * Write int length followed by array bytes. nnnn
+ * Write int length followed by array bytes.
*/
private void write(final byte[] bytes, final int offset, final int length) throws IOException {
this.out.write(Bytes.toBytes(length));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
index 86a2fefae7a..2b21546a72a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
@@ -82,7 +82,7 @@ public class ByteBufferOutputStream extends OutputStream implements ByteBufferWr
}
/**
- * This flips the underlying BB so be sure to use it _last_! n
+ * This flips the underlying BB so be sure to use it _last_!
*/
public ByteBuffer getByteBuffer() {
curBuf.flip();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
index 1613bd563d0..d1310137e8c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
@@ -38,14 +38,14 @@ public interface CellOutputStream {
* Implementation must copy the entire state of the Cell. If the written Cell is modified
* immediately after the write method returns, the modifications must have absolutely no effect on
* the copy of the Cell that was added in the write.
- * @param cell Cell to write out n
+ * @param cell Cell to write out
*/
void write(Cell cell) throws IOException;
/**
* Let the implementation decide what to do. Usually means writing accumulated data into a byte[]
* that can then be read from the implementation to be sent to disk, put in the block cache, or
- * sent over the network. n
+ * sent over the network.
*/
void flush() throws IOException;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
index 593802bf3b6..08942426f87 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
@@ -154,8 +154,9 @@ public class ImmutableBytesWritable implements WritableComparable<ImmutableBytes
}
/**
- * Compares the bytes in this object to the specified byte array n * @return Positive if left is
- * bigger than right, 0 if they are equal, and negative if left is smaller than right.
+ * Compares the bytes in this object to the specified byte array
+ * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
+ * smaller than right.
*/
public int compareTo(final byte[] that) {
return WritableComparator.compareBytes(this.bytes, this.offset, this.length, that, 0,
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
index af9126d942d..74b0f2db108 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
@@ -57,7 +57,7 @@ public class TagCompressionContext {
* @param out Stream to which the compressed tags to be written
* @param in Source where tags are available
* @param offset Offset for the tags bytes
- * @param length Length of all tag bytes n
+ * @param length Length of all tag bytes
*/
public void compressTags(OutputStream out, byte[] in, int offset, int length) throws IOException {
int pos = offset;
@@ -76,7 +76,7 @@ public class TagCompressionContext {
* @param out Stream to which the compressed tags to be written
* @param in Source buffer where tags are available
* @param offset Offset for the tags byte buffer
- * @param length Length of all tag bytes n
+ * @param length Length of all tag bytes
*/
public void compressTags(OutputStream out, ByteBuffer in, int offset, int length)
throws IOException {
@@ -101,7 +101,7 @@ public class TagCompressionContext {
* @param src Stream where the compressed tags are available
* @param dest Destination array where to write the uncompressed tags
* @param offset Offset in destination where tags to be written
- * @param length Length of all tag bytes n
+ * @param length Length of all tag bytes
*/
public void uncompressTags(InputStream src, byte[] dest, int offset, int length)
throws IOException {
@@ -133,7 +133,7 @@ public class TagCompressionContext {
* @param dest Destination array where to write the uncompressed tags
* @param offset Offset in destination where tags to be written
* @param length Length of all tag bytes
- * @return bytes count read from source to uncompress all tags. n
+ * @return bytes count read from source to uncompress all tags.
*/
public int uncompressTags(ByteBuff src, byte[] dest, int offset, int length) throws IOException {
int srcBeginPos = src.position();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
index 09647b4ce91..f0152968162 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
@@ -86,7 +86,7 @@ public abstract class Cipher {
* @param out the output stream to wrap
* @param context the encryption context
* @param iv initialization vector
- * @return the encrypting wrapper n
+ * @return the encrypting wrapper
*/
public abstract OutputStream createEncryptionStream(OutputStream out, Context context, byte[] iv)
throws IOException;
@@ -95,7 +95,7 @@ public abstract class Cipher {
* Create an encrypting output stream given an initialized encryptor
* @param out the output stream to wrap
* @param encryptor the encryptor
- * @return the encrypting wrapper n
+ * @return the encrypting wrapper
*/
public abstract OutputStream createEncryptionStream(OutputStream out, Encryptor encryptor)
throws IOException;
@@ -105,7 +105,7 @@ public abstract class Cipher {
* @param in the input stream to wrap
* @param context the encryption context
* @param iv initialization vector
- * @return the decrypting wrapper n
+ * @return the decrypting wrapper
*/
public abstract InputStream createDecryptionStream(InputStream in, Context context, byte[] iv)
throws IOException;
@@ -114,7 +114,7 @@ public abstract class Cipher {
* Create a decrypting output stream given an initialized decryptor
* @param in the input stream to wrap
* @param decryptor the decryptor
- * @return the decrypting wrapper n
+ * @return the decrypting wrapper
*/
public abstract InputStream createDecryptionStream(InputStream in, Decryptor decryptor)
throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
index 0d29fe990b9..93822784594 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface Decryptor {
/**
- * Set the secret key n
+ * Set the secret key
*/
public void setKey(Key key);
@@ -45,12 +45,12 @@ public interface Decryptor {
public int getBlockSize();
/**
- * Set the initialization vector n
+ * Set the initialization vector
*/
public void setIv(byte[] iv);
/**
- * Create a stream for decryption n
+ * Create a stream for decryption
*/
public InputStream createDecryptionStream(InputStream in);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 761fe04d6fc..13e335b82ee 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -318,7 +318,7 @@ public final class Encryption {
* <p>
* The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
* @param out ciphertext
- * @param src plaintext nnnn
+ * @param src plaintext
*/
public static void encrypt(OutputStream out, byte[] src, int offset, int length, Encryptor e)
throws IOException {
@@ -333,7 +333,7 @@ public final class Encryption {
/**
* Encrypt a block of plaintext
* @param out ciphertext
- * @param src plaintext nnnnn
+ * @param src plaintext
*/
public static void encrypt(OutputStream out, byte[] src, int offset, int length, Context context,
byte[] iv) throws IOException {
@@ -349,7 +349,7 @@ public final class Encryption {
* <p>
* The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
* @param out ciphertext
- * @param in plaintext nn
+ * @param in plaintext
*/
public static void encrypt(OutputStream out, InputStream in, Encryptor e) throws IOException {
OutputStream cout = e.createEncryptionStream(out);
@@ -363,7 +363,7 @@ public final class Encryption {
/**
* Encrypt a stream of plaintext given a context and IV
* @param out ciphertext
- * @param in plaintet nnn
+ * @param in plaintet
*/
public static void encrypt(OutputStream out, InputStream in, Context context, byte[] iv)
throws IOException {
@@ -378,7 +378,6 @@ public final class Encryption {
* Decrypt a block of ciphertext read in from a stream with the given cipher and context
* <p>
* The decryptor's state will be finalized. It should be reinitialized or returned to the pool.
- * nnnnnn
*/
public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize, Decryptor d)
throws IOException {
@@ -391,7 +390,7 @@ public final class Encryption {
}
/**
- * Decrypt a block of ciphertext from a stream given a context and IV nnnnnnn
+ * Decrypt a block of ciphertext from a stream given a context and IV
*/
public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize,
Context context, byte[] iv) throws IOException {
@@ -402,7 +401,7 @@ public final class Encryption {
}
/**
- * Decrypt a stream of ciphertext given a decryptor nnnnn
+ * Decrypt a stream of ciphertext given a decryptor
*/
public static void decrypt(OutputStream out, InputStream in, int outLen, Decryptor d)
throws IOException {
@@ -425,7 +424,7 @@ public final class Encryption {
}
/**
- * Decrypt a stream of ciphertext given a context and IV nnnnnn
+ * Decrypt a stream of ciphertext given a context and IV
*/
public static void decrypt(OutputStream out, InputStream in, int outLen, Context context,
byte[] iv) throws IOException {
@@ -436,7 +435,8 @@ public final class Encryption {
}
/**
- * Resolves a key for the given subject nn * @return a key for the given subject
+ * Resolves a key for the given subject
+ * @return a key for the given subject
* @throws IOException if the key is not found
*/
public static Key getSecretKeyForSubject(String subject, Configuration conf) throws IOException {
@@ -460,7 +460,7 @@ public final class Encryption {
* @param in plaintext
* @param conf configuration
* @param cipher the encryption algorithm
- * @param iv the initialization vector, can be null n
+ * @param iv the initialization vector, can be null
*/
public static void encryptWithSubjectKey(OutputStream out, InputStream in, String subject,
Configuration conf, Cipher cipher, byte[] iv) throws IOException {
@@ -482,7 +482,7 @@ public final class Encryption {
* @param subject the subject's key alias
* @param conf configuration
* @param cipher the encryption algorithm
- * @param iv the initialization vector, can be null n
+ * @param iv the initialization vector, can be null
*/
public static void decryptWithSubjectKey(OutputStream out, InputStream in, int outLen,
String subject, Configuration conf, Cipher cipher, byte[] iv) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
index f030de3e174..34f0fa4c0f7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface Encryptor {
/**
- * Set the secret key n
+ * Set the secret key
*/
public void setKey(Key key);
@@ -50,12 +50,12 @@ public interface Encryptor {
public byte[] getIv();
/**
- * Set the initialization vector n
+ * Set the initialization vector
*/
public void setIv(byte[] iv);
/**
- * Create a stream for encryption n
+ * Create a stream for encryption
*/
public OutputStream createEncryptionStream(OutputStream out);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
index 6c6ec5dd759..0852bc7f13f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
@@ -31,13 +31,13 @@ public interface KeyProvider {
public static final String PASSWORDFILE = "passwordfile";
/**
- * Initialize the key provider n
+ * Initialize the key provider
*/
public void init(String params);
/**
- * Retrieve the key for a given key aliase n * @return the keys corresponding to the supplied
- * alias, or null if a key is not found
+ * Retrieve the key for a given key aliase
+ * @return the keys corresponding to the supplied alias, or null if a key is not found
*/
public Key getKey(String alias);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
index 7f13b2c6f66..52825b6c683 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
@@ -90,9 +90,8 @@ public interface DataBlockEncoder {
EncodedSeeker createSeeker(HFileBlockDecodingContext decodingCtx);
/**
- * Creates a encoder specific encoding context n * store configuration n * encoding strategy used
- * n * header bytes to be written, put a dummy header here if the header is unknown n * HFile meta
- * data
+ * Creates a encoder specific encoding context store configuration encoding strategy used header
+ * bytes to be written, put a dummy header here if the header is unknown HFile meta data
* @return a newly created encoding context
*/
HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf,
@@ -100,7 +99,7 @@ public interface DataBlockEncoder {
/**
* Creates an encoder specific decoding context, which will prepare the data before actual
- * decoding n * store configuration n * HFile meta data
+ * decoding store configuration HFile meta data
* @return a newly created decoding context
*/
HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, HFileContext meta);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
index 21f6c92ef35..4eba8fd854e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
@@ -99,7 +99,7 @@ public enum DataBlockEncoding {
/**
* Writes id bytes to the given array starting from offset.
* @param dest output array
- * @param offset starting offset of the output array n
+ * @param offset starting offset of the output array
*/
// System.arraycopy is static native. Nothing we can do this until we have minimum JDK 9.
@SuppressWarnings("UnsafeFinalization")
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
index 3948aee35ae..68b300ae60f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
@@ -181,7 +181,7 @@ public class EncodedDataBlock {
* @param inputBuffer Array to be compressed.
* @param offset Offset to beginning of the data.
* @param length Length to be compressed.
- * @return Size of compressed data in bytes. n
+ * @return Size of compressed data in bytes.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH_EXCEPTION",
justification = "No sure what findbugs wants but looks to me like no NPE")
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
index 6835a8bac3c..63f173c38cc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
@@ -32,9 +32,9 @@ public interface HFileBlockDecodingContext {
/**
* Perform all actions that need to be done before the encoder's real decoding process.
* Decompression needs to be done if {@link HFileContext#getCompression()} returns a valid
- * compression algorithm. n * numBytes after block and encoding headers n * numBytes without
- * header required to store the block after decompressing (not decoding) n * ByteBuffer pointed
- * after the header but before the data n * on disk data to be decoded
+ * compression algorithm. numBytes after block and encoding headers numBytes without header
+ * required to store the block after decompressing (not decoding) ByteBuffer pointed after the
+ * header but before the data on disk data to be decoded
*/
void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader,
ByteBuff blockBufferWithoutHeader, ByteBuff onDiskBlock) throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
index ed97147ac9b..ad193cad613 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
@@ -91,7 +91,7 @@ public enum IndexBlockEncoding {
/**
* Writes id bytes to the given array starting from offset.
* @param dest output array
- * @param offset starting offset of the output array n
+ * @param offset starting offset of the output array
*/
public void writeIdInBytes(byte[] dest, int offset) throws IOException {
System.arraycopy(idInBytes, 0, dest, offset, ID_SIZE);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
index 5a61622101b..a2e63b9fda0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
@@ -93,7 +93,8 @@ public class ThrottledInputStream extends InputStream {
/**
* Read bytes starting from the specified position. This requires rawStream is an instance of
- * {@link PositionedReadable}. nnnn * @return the number of bytes read
+ * {@link PositionedReadable}.
+ * @return the number of bytes read
*/
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
if (!(rawStream instanceof PositionedReadable)) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
index 157df98a9b0..b1ab8a9b28d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
@@ -80,7 +80,7 @@ public interface Dictionary {
* @param data the data to be written in byte[]
* @param offset the offset
* @param length length to be written
- * @param dict the dictionary whose contents are to written n
+ * @param dict the dictionary whose contents are to written
*/
public static void write(OutputStream out, byte[] data, int offset, int length, Dictionary dict)
throws IOException {
@@ -103,7 +103,7 @@ public interface Dictionary {
* @param data the data to be written in ByteBuffer
* @param offset the offset
* @param length length to be written
- * @param dict the dictionary whose contents are to written n
+ * @param dict the dictionary whose contents are to written
*/
public static void write(OutputStream out, ByteBuffer data, int offset, int length,
Dictionary dict) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
index 7cfa007478f..97e1e9d3345 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
@@ -118,8 +118,8 @@ public class StreamUtils {
}
/**
- * Reads a varInt value stored in an array. n * Input array where the varInt is available n *
- * Offset in the input array where varInt is available
+ * Reads a varInt value stored in an array. Input array where the varInt is available Offset in
+ * the input array where varInt is available
* @return A pair of integers in which first value is the actual decoded varInt value and second
* value as number of bytes taken by this varInt for it's storage in the input array.
* @throws IOException When varint is malformed and not able to be read correctly
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
index 27eca9479d6..9e77bfcd04b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
@@ -85,7 +85,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
public abstract int position();
/**
- * Sets this ByteBuff's position to the given value. n * @return this object
+ * Sets this ByteBuff's position to the given value.
+ * @return this object
*/
public abstract ByteBuff position(int position);
@@ -184,7 +185,7 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
public abstract byte get();
/**
- * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
+ * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
* @return the byte at the given index
*/
public abstract byte get(int index);
@@ -244,7 +245,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
public abstract ByteBuff put(byte[] src, int offset, int length);
/**
- * Copies from the given byte[] to this ByteBuff n * @return this ByteBuff
+ * Copies from the given byte[] to this ByteBuff
+ * @return this ByteBuff
* @param src source byte array
* @return this ByteBuff
*/
@@ -269,14 +271,15 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
* Fetches the short value at the given index. Does not change position of the underlying
* ByteBuffers. The caller is sure that the index will be after the current position of this
* ByteBuff. So even if the current short does not fit in the current item we can safely move to
- * the next item and fetch the remaining bytes forming the short n * @return the short value at
- * the given index
+ * the next item and fetch the remaining bytes forming the short
+ * @return the short value at the given index
*/
public abstract short getShort(int index);
/**
* Fetches the short value at the given offset from current position. Does not change position of
- * the underlying ByteBuffers. n * @return the short value at the given index.
+ * the underlying ByteBuffers.
+ * @return the short value at the given index.
*/
public abstract short getShortAfterPosition(int offset);
@@ -319,13 +322,15 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
* Fetches the long at the given index. Does not change position of the underlying ByteBuffers.
* The caller is sure that the index will be after the current position of this ByteBuff. So even
* if the current long does not fit in the current item we can safely move to the next item and
- * fetch the remaining bytes forming the long n * @return the long value at the given index
+ * fetch the remaining bytes forming the long
+ * @return the long value at the given index
*/
public abstract long getLong(int index);
/**
* Fetches the long value at the given offset from current position. Does not change position of
- * the underlying ByteBuffers. n * @return the long value at the given index.
+ * the underlying ByteBuffers.
+ * @return the long value at the given index.
*/
public abstract long getLongAfterPosition(int offset);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
index c55ee021bd0..ddd567eb4b9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
@@ -149,8 +149,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the byte at the given index
+ * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
+ * @return the byte at the given index
*/
@Override
public byte get(int index) {
@@ -201,8 +201,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Fetches the int at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the int value at the given index
+ * Fetches the int at the given index. Does not change position of the underlying ByteBuffers
+ * @return the int value at the given index
*/
@Override
public int getInt(int index) {
@@ -235,8 +235,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Fetches the short at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the short value at the given index
+ * Fetches the short at the given index. Does not change position of the underlying ByteBuffers
+ * @return the short value at the given index
*/
@Override
public short getShort(int index) {
@@ -347,8 +347,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Fetches the long at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the long value at the given index
+ * Fetches the long at the given index. Does not change position of the underlying ByteBuffers
+ * @return the long value at the given index
*/
@Override
public long getLong(int index) {
@@ -388,7 +388,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Sets this MBB's position to the given value. n * @return this object
+ * Sets this MBB's position to the given value.
+ * @return this object
*/
@Override
public MultiByteBuff position(int position) {
@@ -569,7 +570,7 @@ public class MultiByteBuff extends ByteBuff {
/**
* Copies the content from this MBB's current position to the byte array and fills it. Also
- * advances the position of the MBB by the length of the byte[]. n
+ * advances the position of the MBB by the length of the byte[].
*/
@Override
public void get(byte[] dst) {
@@ -615,7 +616,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Marks the limit of this MBB. n * @return This MBB
+ * Marks the limit of this MBB.
+ * @return This MBB
*/
@Override
public MultiByteBuff limit(int limit) {
@@ -686,8 +688,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Writes a byte to this MBB at the current position and increments the position n * @return this
- * object
+ * Writes a byte to this MBB at the current position and increments the position
+ * @return this object
*/
@Override
public MultiByteBuff put(byte b) {
@@ -960,7 +962,7 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Jumps the current position of this MBB by specified length. n
+ * Jumps the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff skip(int length) {
@@ -982,7 +984,7 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Jumps back the current position of this MBB by specified length. n
+ * Jumps back the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff moveBack(int length) {
@@ -1109,8 +1111,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Copy the content from this MBB to a byte[] based on the given offset and length n * the
- * position from where the copy should start n * the length upto which the copy has to be done
+ * Copy the content from this MBB to a byte[] based on the given offset and length the position
+ * from where the copy should start the length upto which the copy has to be done
* @return byte[] with the copied contents from this MBB.
*/
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
index 9ef9e2ddc17..e2cac4b6b56 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
@@ -179,7 +179,7 @@ public abstract class User {
/**
* Wraps an underlying {@code UserGroupInformation} instance.
- * @param ugi The base Hadoop user n
+ * @param ugi The base Hadoop user
*/
public static User create(UserGroupInformation ugi) {
if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
index fcf6cc64896..436b5bbc69a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
@@ -181,7 +181,7 @@ public class UserProvider extends BaseConfigurable {
/**
* Wraps an underlying {@code UserGroupInformation} instance.
- * @param ugi The base Hadoop user n
+ * @param ugi The base Hadoop user
*/
public User create(UserGroupInformation ugi) {
if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
index 179074ef00c..88ee9c9666a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
@@ -69,7 +69,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange
/**
* Update the beginning of this range. {@code offset + length} may not be greater than
- * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+ * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
* @return this.
*/
@Override
@@ -82,7 +82,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange
/**
* Update the length of this range. {@code offset + length} should not be greater than
* {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
- * {@code position} to {@code length}. n * The new length of this range.
+ * {@code position} to {@code length}. The new length of this range.
* @return this.
*/
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
index 32c6779bc04..be1868b70d7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
@@ -865,7 +865,7 @@ public final class ByteBufferUtils {
}
/**
- * n * ByteBuffer to hash n * offset to start from n * length to hash
+ * ByteBuffer to hash offset to start from length to hash
*/
public static int hashCode(ByteBuffer buf, int offset, int length) {
int hash = 1;
@@ -980,7 +980,7 @@ public final class ByteBufferUtils {
* @param buf The ByteBuffer
* @param offset Offset to int value
* @param length Number of bytes used to store the int value.
- * @return the int value n * if there's not enough bytes left in the buffer after the given offset
+ * @return the int value if there's not enough bytes left in the buffer after the given offset
*/
public static int readAsInt(ByteBuffer buf, int offset, final int length) {
if (offset + length > buf.limit()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
index 64bd5cb3b6c..4addf9057e2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
@@ -63,14 +63,13 @@ public interface ByteRange extends Comparable<ByteRange> {
/**
* Nullifies this ByteRange. That is, it becomes a husk, being a range over no byte[] whatsoever.
- * n
*/
public ByteRange unset();
/**
* Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
* is set to {@code capacity}.
- * @param capacity the size of a new byte[]. n
+ * @param capacity the size of a new byte[].
*/
public ByteRange set(int capacity);
@@ -78,7 +77,7 @@ public interface ByteRange extends Comparable<ByteRange> {
* Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
* is set to {@code bytes.length}. A null {@code bytes} IS supported, in which case this method
* will behave equivalently to {@link #unset()}.
- * @param bytes the array to wrap. n
+ * @param bytes the array to wrap.
*/
public ByteRange set(byte[] bytes);
@@ -188,21 +187,21 @@ public interface ByteRange extends Comparable<ByteRange> {
/**
* Store the short value at {@code index}
* @param index the index in the range where {@code val} is stored
- * @param val the value to store n
+ * @param val the value to store
*/
public ByteRange putShort(int index, short val);
/**
* Store the int value at {@code index}
* @param index the index in the range where {@code val} is stored
- * @param val the value to store n
+ * @param val the value to store
*/
public ByteRange putInt(int index, int val);
/**
* Store the long value at {@code index}
* @param index the index in the range where {@code val} is stored
- * @param val the value to store n
+ * @param val the value to store
*/
public ByteRange putLong(int index, long val);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index d6662506040..0203cc390fe 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -231,8 +231,9 @@ public class Bytes implements Comparable<Bytes> {
}
/**
- * Compares the bytes in this object to the specified byte array n * @return Positive if left is
- * bigger than right, 0 if they are equal, and negative if left is smaller than right.
+ * Compares the bytes in this object to the specified byte array
+ * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
+ * smaller than right.
*/
public int compareTo(final byte[] that) {
return BYTES_RAWCOMPARATOR.compare(this.bytes, this.offset, this.length, that, 0, that.length);
@@ -534,7 +535,8 @@ public class Bytes implements Comparable<Bytes> {
/**
* Write a printable representation of a byte array.
- * @param b byte array n * @see #toStringBinary(byte[], int, int)
+ * @param b byte array
+ * @see #toStringBinary(byte[], int, int)
*/
public static String toStringBinary(final byte[] b) {
if (b == null) return "null";
@@ -2055,7 +2057,7 @@ public class Bytes implements Comparable<Bytes> {
* Copy the byte array given in parameter and return an instance of a new byte array with the same
* length and the same content.
* @param bytes the byte array to copy from
- * @return a copy of the given designated byte array nn
+ * @return a copy of the given designated byte array
*/
public static byte[] copy(byte[] bytes, final int offset, final int length) {
if (bytes == null) return null;
@@ -2236,7 +2238,7 @@ public class Bytes implements Comparable<Bytes> {
}
/**
- * Fill given array with zeros at the specified position. nnn
+ * Fill given array with zeros at the specified position.
*/
public static void zero(byte[] b, int offset, int length) {
checkPositionIndex(offset, b.length, "offset");
@@ -2319,7 +2321,8 @@ public class Bytes implements Comparable<Bytes> {
}
/**
- * Create a byte array which is multiple given bytes nn * @return byte array
+ * Create a byte array which is multiple given bytes
+ * @return byte array
*/
public static byte[] multiple(byte[] srcBytes, int multiNum) {
if (multiNum <= 0) {
@@ -2374,7 +2377,7 @@ public class Bytes implements Comparable<Bytes> {
/**
* Create a byte array from a string of hash digits. The length of the string must be a multiple
- * of 2 n
+ * of 2
*/
public static byte[] fromHex(String hex) {
checkArgument(hex.length() % 2 == 0, "length must be a multiple of 2");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
index d943803fb2f..dc810834a66 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
@@ -85,8 +85,8 @@ public enum ChecksumType {
}
/**
- * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes. n
- * * @return Type associated with passed code.
+ * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
+ * @return Type associated with passed code.
*/
public static ChecksumType codeToType(final byte b) {
for (ChecksumType t : ChecksumType.values()) {
@@ -98,8 +98,8 @@ public enum ChecksumType {
}
/**
- * Map a checksum name to a specific type. Do our own names. n * @return Type associated with
- * passed code.
+ * Map a checksum name to a specific type. Do our own names.
+ * @return Type associated with passed code.
*/
public static ChecksumType nameToType(final String name) {
for (ChecksumType t : ChecksumType.values()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
index 84e70873727..1b3eef180a5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
@@ -27,10 +27,9 @@ public class Classes {
/**
* Equivalent of {@link Class#forName(String)} which also returns classes for primitives like
- * <code>boolean</code>, etc. n * The name of the class to retrieve. Can be either a normal class
- * or a primitive class.
- * @return The class specified by <code>className</code> n * If the requested class can not be
- * found.
+ * <code>boolean</code>, etc. The name of the class to retrieve. Can be either a normal class or a
+ * primitive class.
+ * @return The class specified by <code>className</code> If the requested class can not be found.
*/
public static Class<?> extendedForName(String className) throws ClassNotFoundException {
Class<?> valueType;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index ca8d27d8eeb..80076495456 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -328,7 +328,7 @@ public final class CommonFSUtils {
* Returns the URI in the string format
* @param c configuration
* @param p path
- * @return - the URI's to string format n
+ * @return - the URI's to string format
*/
public static String getDirUri(final Configuration c, Path p) throws IOException {
if (p.toUri().getScheme() != null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
index 531d12085fe..a5e6a65efc9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
@@ -215,7 +215,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
* @param parent the parent class loader for exempted classes
* @param pathPrefix a prefix used in temp path name to store the jar file locally
* @param conf the configuration used to create the class loader, if needed
- * @return a CoprocessorClassLoader for the coprocessor jar path n
+ * @return a CoprocessorClassLoader for the coprocessor jar path
*/
public static CoprocessorClassLoader getClassLoader(final Path path, final ClassLoader parent,
final String pathPrefix, final Configuration conf) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
index 276e436ed13..0cd1b41c502 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
@@ -58,7 +58,7 @@ public class KeyLocker<K> {
}, NB_CONCURRENT_LOCKS);
/**
- * Return a lock for the given key. The lock is already locked. n
+ * Return a lock for the given key. The lock is already locked.
*/
public ReentrantLock acquireLock(K key) {
if (key == null) throw new IllegalArgumentException("key must not be null");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
index d967f5d53a7..7e143e15de2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
@@ -29,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience;
public class MD5Hash {
/**
- * Given a byte array, returns in MD5 hash as a hex string. n * @return SHA1 hash as a 32
- * character hex string.
+ * Given a byte array, returns in MD5 hash as a hex string.
+ * @return SHA1 hash as a 32 character hex string.
*/
public static String getMD5AsHex(byte[] key) {
return getMD5AsHex(key, 0, key.length);
@@ -39,8 +39,8 @@ public class MD5Hash {
/**
* Given a byte array, returns its MD5 hash as a hex string. Only "length" number of bytes
* starting at "offset" within the byte array are used.
- * @param key the key to hash (variable length byte array) nn * @return MD5 hash as a 32 character
- * hex string.
+ * @param key the key to hash (variable length byte array)
+ * @return MD5 hash as a 32 character hex string.
*/
public static String getMD5AsHex(byte[] key, int offset, int length) {
try {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
index dd8eb4f1858..fe8d111dfbe 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
@@ -74,14 +74,14 @@ public class Pair<T1, T2> implements Serializable {
}
/**
- * Return the first element stored in the pair. n
+ * Return the first element stored in the pair.
*/
public T1 getFirst() {
return first;
}
/**
- * Return the second element stored in the pair. n
+ * Return the second element stored in the pair.
*/
public T2 getSecond() {
return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
index 44bc2b81dc0..ef44fc4e043 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
@@ -42,14 +42,14 @@ public class PairOfSameType<T> implements Iterable<T> {
}
/**
- * Return the first element stored in the pair. n
+ * Return the first element stored in the pair.
*/
public T getFirst() {
return first;
}
/**
- * Return the second element stored in the pair. n
+ * Return the second element stored in the pair.
*/
public T getSecond() {
return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
index efa52612be6..cb61cfbe246 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
@@ -156,12 +156,12 @@ public interface PositionedByteRange extends ByteRange {
public PositionedByteRange put(byte[] val, int offset, int length);
/**
- * Limits the byte range upto a specified value. Limit cannot be greater than capacity nn
+ * Limits the byte range upto a specified value. Limit cannot be greater than capacity
*/
public PositionedByteRange setLimit(int limit);
/**
- * Return the current limit n
+ * Return the current limit
*/
public int getLimit();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index c3d4d82f6bd..f73064f70a8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@ -77,8 +77,8 @@ public final class PrettyPrinter {
/**
* Convert a human readable string to its value.
- * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit) nn * @return the value
- * corresponding to the human readable string
+ * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
+ * @return the value corresponding to the human readable string
*/
public static String valueOf(final String pretty, final Unit unit) throws HBaseException {
StringBuilder value = new StringBuilder();
@@ -155,7 +155,8 @@ public final class PrettyPrinter {
* Convert a human readable time interval to seconds. Examples of the human readable time
* intervals are: 50 DAYS 1 HOUR 30 MINUTES , 25000 SECONDS etc. The units of time specified can
* be in uppercase as well as lowercase. Also, if a single number is specified without any time
- * unit, it is assumed to be in seconds. n * @return value in seconds
+ * unit, it is assumed to be in seconds.
+ * @return value in seconds
*/
private static long humanReadableIntervalToSec(final String humanReadableInterval)
throws HBaseException {
@@ -261,7 +262,7 @@ public final class PrettyPrinter {
* KB , 25000 B etc. The units of size specified can be in uppercase as well as lowercase. Also,
* if a single number is specified without any time unit, it is assumed to be in bytes.
* @param humanReadableSize human readable size
- * @return value in bytes n
+ * @return value in bytes
*/
private static long humanReadableSizeToBytes(final String humanReadableSize)
throws HBaseException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
index 24b9f2d997b..868c731e0a8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
@@ -66,22 +66,22 @@ public class SimpleMutableByteRange extends AbstractByteRange {
/**
* Create a new {@code ByteRange} over a new backing array of size {@code capacity}. The range's
- * offset and length are 0 and {@code capacity}, respectively. n * the size of the backing array.
+ * offset and length are 0 and {@code capacity}, respectively. the size of the backing array.
*/
public SimpleMutableByteRange(int capacity) {
this(new byte[capacity]);
}
/**
- * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap.
+ * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap.
*/
public SimpleMutableByteRange(byte[] bytes) {
set(bytes);
}
/**
- * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap. n * The
- * offset into {@code bytes} considered the beginning of this range. n * The length of this range.
+ * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap. The offset
+ * into {@code bytes} considered the beginning of this range. The length of this range.
*/
public SimpleMutableByteRange(byte[] bytes, int offset, int length) {
set(bytes, offset, length);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
index d91fd712f37..68e99c3053b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
@@ -70,7 +70,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
/**
* Create a new {@code PositionedByteRange} over a new backing array of size {@code capacity}. The
- * range's offset and length are 0 and {@code capacity}, respectively. n * the size of the backing
+ * range's offset and length are 0 and {@code capacity}, respectively. the size of the backing
* array.
*/
public SimplePositionedMutableByteRange(int capacity) {
@@ -78,17 +78,15 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
}
/**
- * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
- * wrap.
+ * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
*/
public SimplePositionedMutableByteRange(byte[] bytes) {
set(bytes);
}
/**
- * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
- * wrap. n * The offset into {@code bytes} considered the beginning of this range. n * The length
- * of this range.
+ * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
+ * The offset into {@code bytes} considered the beginning of this range. The length of this range.
*/
public SimplePositionedMutableByteRange(byte[] bytes, int offset, int length) {
set(bytes, offset, length);
@@ -130,7 +128,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
/**
* Update the beginning of this range. {@code offset + length} may not be greater than
- * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+ * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
* @return this.
*/
@Override
@@ -143,7 +141,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
/**
* Update the length of this range. {@code offset + length} should not be greater than
* {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
- * {@code position} to {@code length}. n * The new length of this range.
+ * {@code position} to {@code length}. The new length of this range.
* @return this.
*/
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
index 0caecf649ce..e23c62045fa 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
@@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface TimeMeasurable<T> {
/**
- * Measure elapsed time. n
+ * Measure elapsed time.
*/
T measure();
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
index 48b60a49616..3aa8a6ec123 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
@@ -193,7 +193,7 @@ public final class UnsafeAccess {
/**
* Reads a int value at the given Object's offset considering it was written in big-endian format.
- * nn * @return int value at offset
+ * @return int value at offset
*/
public static int toInt(Object ref, long offset) {
if (LITTLE_ENDIAN) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
index 154bc0e42db..2c600e3c5fd 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
@@ -73,7 +73,7 @@ public class WindowMovingAverage<T> extends MovingAverage<T> {
/**
* Get statistics at index.
- * @param index index of bar n
+ * @param index index of bar
*/
protected long getStatisticsAtIndex(int index) {
if (index < 0 || index >= getNumberOfStatistics()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
index de0cbdfa918..32cfde410d5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
@@ -106,8 +106,8 @@ public final class ZKConfig {
}
/**
- * Return the ZK Quorum servers string given the specified configuration n * @return Quorum
- * servers String
+ * Return the ZK Quorum servers string given the specified configuration
+ * @return Quorum servers String
*/
private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf) {
String defaultClientPort = Integer.toString(
@@ -191,8 +191,8 @@ public final class ZKConfig {
/**
* Separate the given key into the three configurations it should contain: hbase.zookeeper.quorum,
- * hbase.zookeeper.client.port and zookeeper.znode.parent n * @return the three configuration in
- * the described order n
+ * hbase.zookeeper.client.port and zookeeper.znode.parent
+ * @return the three configuration in the described order
*/
public static ZKClusterKey transformClusterKey(String key) throws IOException {
List<String> parts = Splitter.on(':').splitToList(key);
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6bb93c09c09..e959f77a722 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -258,7 +258,7 @@ public class TestHBaseConfiguration {
}
/**
- * Wrapper to fetch the configured {@code List<CredentialProvider>}s. n * Configuration with
+ * Wrapper to fetch the configured {@code List<CredentialProvider>}s. Configuration with
* GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS defined
* @return List of CredentialProviders, or null if they could not be loaded
*/
@@ -283,8 +283,8 @@ public class TestHBaseConfiguration {
/**
* Create a CredentialEntry using the configured Providers. If multiple CredentialProviders are
- * configured, the first will be used. n * Configuration for the CredentialProvider n *
- * CredentialEntry name (alias) n * The credential
+ * configured, the first will be used. Configuration for the CredentialProvider CredentialEntry
+ * name (alias) The credential
*/
public void createEntry(Configuration conf, String name, char[] credential) throws Exception {
if (!isHadoopCredentialProviderAvailable()) {
@@ -303,8 +303,8 @@ public class TestHBaseConfiguration {
/**
* Create a CredentialEntry with the give name and credential in the credentialProvider. The
- * credentialProvider argument must be an instance of Hadoop CredentialProvider. n * Instance of
- * CredentialProvider n * CredentialEntry name (alias) n * The credential to store
+ * credentialProvider argument must be an instance of Hadoop CredentialProvider. Instance of
+ * CredentialProvider CredentialEntry name (alias) The credential to store
*/
private void createEntryInProvider(Object credentialProvider, String name, char[] credential)
throws Exception {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java
index 583e7efcfa9..0185ebff0ec 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java
@@ -409,7 +409,7 @@ public final class X509TestContext {
* circumstances to inject a "bad" certificate where the keystore doesn't match the CA in the
* truststore. Or use it to create a connection without a truststore.
* @see #setConfigurations(KeyStoreFileType, KeyStoreFileType) which sets both keystore and
- * truststore and is more applicable to general use. nnn
+ * truststore and is more applicable to general use.
*/
public void setKeystoreConfigurations(KeyStoreFileType keyStoreFileType, Configuration confToSet)
throws IOException {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java
index 56d3c8cb859..78d70f8f581 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java
@@ -371,7 +371,7 @@ final class X509TestHelpers {
* @param cert the certificate to serialize.
* @param keyPassword an optional password to encrypt the trust store. If empty or null, the cert
* will not be encrypted.
- * @return the serialized bytes of the BCFKS trust store. nn
+ * @return the serialized bytes of the BCFKS trust store.
*/
public static byte[] certToBCFKSTrustStoreBytes(X509Certificate cert, char[] keyPassword)
throws IOException, GeneralSecurityException {
@@ -434,7 +434,7 @@ final class X509TestHelpers {
* @param privateKey the private key to serialize.
* @param keyPassword an optional key password. If empty or null, the private key will not be
* encrypted.
- * @return the serialized bytes of the BCFKS key store. nn
+ * @return the serialized bytes of the BCFKS key store.
*/
public static byte[] certAndPrivateKeyToBCFKSBytes(X509Certificate cert, PrivateKey privateKey,
char[] keyPassword) throws IOException, GeneralSecurityException {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java
index b07a924a4e3..6635accedbb 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java
@@ -51,7 +51,7 @@ public class RandomDistribution {
/**
* Generate random integers from min (inclusive) to max (exclusive) following even distribution.
- * n * The basic random number generator. n * Minimum integer n * maximum integer (exclusive).
+ * The basic random number generator. Minimum integer maximum integer (exclusive).
*/
public Flat(Random random, int min, int max) {
if (min >= max) {
@@ -82,17 +82,16 @@ public class RandomDistribution {
private final ArrayList<Double> v;
/**
- * Constructor n * The random number generator. n * minimum integer (inclusvie) n * maximum
- * integer (exclusive) n * parameter sigma. (sigma > 1.0)
+ * Constructor The random number generator. minimum integer (inclusvie) maximum integer
+ * (exclusive) parameter sigma. (sigma > 1.0)
*/
public Zipf(Random r, int min, int max, double sigma) {
this(r, min, max, sigma, DEFAULT_EPSILON);
}
/**
- * Constructor. n * The random number generator. n * minimum integer (inclusvie) n * maximum
- * integer (exclusive) n * parameter sigma. (sigma > 1.0) n * Allowable error percentage (0 <
- * epsilon < 1.0).
+ * Constructor. The random number generator. minimum integer (inclusvie) maximum integer
+ * (exclusive) parameter sigma. (sigma > 1.0) Allowable error percentage (0 < epsilon < 1.0).
*/
public Zipf(Random r, int min, int max, double sigma, double epsilon) {
if ((max <= min) || (sigma <= 1) || (epsilon <= 0) || (epsilon >= 0.5)) {
@@ -178,8 +177,8 @@ public class RandomDistribution {
/**
* Generate random integers from min (inclusive) to max (exclusive) following Binomial
- * distribution. n * The basic random number generator. n * Minimum integer n * maximum integer
- * (exclusive). n * parameter.
+ * distribution. The basic random number generator. Minimum integer maximum integer (exclusive).
+ * parameter.
*/
public Binomial(Random random, int min, int max, double p) {
if (min >= max) {
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
index f2d3f63dbcb..fd3cfcc8b87 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
@@ -68,7 +68,7 @@ public class MetricSampleQuantiles {
/**
* Specifies the allowable error for this rank, depending on which quantiles are being targeted.
* This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this
- * rank can be. n * the index in the list of samples
+ * rank can be. the index in the list of samples
*/
private double allowableError(int rank) {
int size = samples.size();
@@ -208,7 +208,7 @@ public class MetricSampleQuantiles {
/**
* Get a snapshot of the current values of all the tracked quantiles.
- * @return snapshot of the tracked quantiles n * if no items have been added to the estimator
+ * @return snapshot of the tracked quantiles if no items have been added to the estimator
*/
synchronized public Map<MetricQuantile, Long> snapshot() throws IOException {
// flush the buffer first for best results
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 50cefc4c39a..ce1b387bc15 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -252,10 +252,10 @@ public class HttpServer implements FilterContainer {
private int port = -1;
/**
- * Add an endpoint that the HTTP server should listen to. n * the endpoint of that the HTTP
- * server should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host
- * specifies the binding address, and the port specifies the listening port. Unspecified or zero
- * port means that the server can listen to any port.
+ * Add an endpoint that the HTTP server should listen to. the endpoint of that the HTTP server
+ * should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host specifies
+ * the binding address, and the port specifies the listening port. Unspecified or zero port
+ * means that the server can listen to any port.
*/
public Builder addEndpoint(URI endpoint) {
endpoints.add(endpoint);
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
index c8456a461bb..494a30c3e77 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -141,7 +141,7 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter {
/**
* The purpose of this function is to get the doAs parameter of a http request case insensitively
- * n * @return doAs parameter if exists or null otherwise
+ * @return doAs parameter if exists or null otherwise
*/
public static String getDoasFromHeader(final HttpServletRequest request) {
String doas = null;
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index d393187b1e4..978de8530ef 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -146,8 +146,8 @@ public class JMXJsonServlet extends HttpServlet {
}
/**
- * Process a GET request for the specified resource. n * The servlet request we are processing n *
- * The servlet response we are creating
+ * Process a GET request for the specified resource. The servlet request we are processing The
+ * servlet response we are creating
*/
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
index 64119ec5095..cc6a99bd300 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
@@ -112,7 +112,7 @@ public final class JSONMetricUtil {
* Method for building map used for constructing ObjectName. Mapping is done with arrays indices
* @param keys Map keys
* @param values Map values
- * @return Map or null if arrays are empty * or have different number of elements
+ * @return Map or null if arrays are empty or have different number of elements
*/
@SuppressWarnings("JdkObsolete") // javax requires hashtable param for ObjectName constructor
public static Hashtable<String, String> buldKeyValueTable(String[] keys, String[] values) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index ae7fef86500..314a70acf12 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -78,7 +78,7 @@ public class DistributedHBaseCluster extends HBaseClusterInterface {
}
/**
- * Returns a ClusterStatus for this HBase cluster n
+ * Returns a ClusterStatus for this HBase cluster
*/
@Override
public ClusterMetrics getClusterMetrics() throws IOException {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index 7223a1f753e..4da9244a6cd 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -663,7 +663,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
}
/**
- * After adding data to the table start a mr job to nnn
+ * After adding data to the table start a mr job to
*/
private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
LOG.info("Running check");
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 068eb574659..0e259f5072a 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -1159,8 +1159,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
}
/**
- * nn * @return Return new byte array that has <code>ordinal</code> as prefix on front taking
- * up Bytes.SIZEOF_SHORT bytes followed by <code>r</code>
+ * Returns new byte array that has <code>ordinal</code> as prefix on front taking up
+ * Bytes.SIZEOF_SHORT bytes followed by <code>r</code>
*/
public static byte[] addPrefixFlag(final int ordinal, final byte[] r) {
byte[] prefix = Bytes.toBytes((short) ordinal);
@@ -1174,7 +1174,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
}
/**
- * n * @return Type from the Counts enum of this row. Reads prefix added by
+ * Returns type from the Counts enum of this row. Reads prefix added by
* {@link #addPrefixFlag(int, byte[])}
*/
public static VerifyCounts whichType(final byte[] bs) {
@@ -1182,9 +1182,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
return VerifyCounts.values()[ordinal];
}
- /**
- * n * @return Row bytes minus the type flag.
- */
+ /** Returns Row bytes minus the type flag. */
public static byte[] getRowOnly(BytesWritable bw) {
byte[] bytes = new byte[bw.getLength() - Bytes.SIZEOF_SHORT];
System.arraycopy(bw.getBytes(), Bytes.SIZEOF_SHORT, bytes, 0, bytes.length);
@@ -1273,7 +1271,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
/**
* Dump out extra info around references if there are any. Helps debugging.
- * @return StringBuilder filled with references if any. n
+ * @return StringBuilder filled with references if any.
*/
@SuppressWarnings("JavaUtilDate")
private StringBuilder dumpExtraInfoOnRefs(final BytesWritable key, final Context context,
@@ -1425,8 +1423,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
}
/**
- * Verify the values in the Counters against the expected number of entries written. n *
- * Expected number of referenced entrires n * The Job's Counters object
+ * Verify the values in the Counters against the expected number of entries written. Expected
+ * number of referenced entrires The Job's Counters object
* @return True if the values match what's expected, false otherwise
*/
protected boolean verifyExpectedValues(long expectedReferenced, Counters counters) {
@@ -1454,7 +1452,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
/**
* Verify that the Counters don't contain values which indicate an outright failure from the
- * Reducers. n * The Job's counters
+ * Reducers. The Job's counters
* @return True if the "bad" counter objects are 0, false otherwise
*/
protected boolean verifyUnexpectedValues(Counters counters) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
index 7bf4b4a95af..451e2d760f3 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
@@ -185,7 +185,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
/**
* This tears down any tables that existed from before and rebuilds the tables and schemas on
* the source cluster. It then sets up replication from the source to the sink cluster by using
- * the {@link org.apache.hadoop.hbase.client.Admin} connection. n
+ * the {@link org.apache.hadoop.hbase.client.Admin} connection.
*/
protected void setupTablesAndReplication() throws Exception {
TableName tableName = getTableName(source.getConfiguration());
@@ -261,7 +261,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
/**
* Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Generator} in the
- * source cluster. This assumes that the tables have been setup via setupTablesAndReplication. n
+ * source cluster. This assumes that the tables have been setup via setupTablesAndReplication.
*/
protected void runGenerator() throws Exception {
Path outputPath = new Path(outputDir);
@@ -282,7 +282,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
* Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Verify} in the sink
* cluster. If replication is working properly the data written at the source cluster should be
* available in the sink cluster after a reasonable gap
- * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster n
+ * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster
*/
protected void runVerify(long expectedNumNodes) throws Exception {
Path outputPath = new Path(outputDir);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
index 60e24be5128..63dc0bb28c8 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
@@ -36,9 +36,6 @@ public class Driver {
pgd = pgd0;
}
- /**
- * nn
- */
public static void main(String[] args) throws Throwable {
pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table");
ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
index 3d609ffd73b..58d8f49839f 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
@@ -74,7 +74,7 @@ public class GroupingTableMap extends MapReduceBase
/**
* Extract the grouping columns from value to construct a new key. Pass the new key and value to
- * reduce. If any of the grouping columns are not found in the value, the record is skipped. nnnnn
+ * reduce. If any of the grouping columns are not found in the value, the record is skipped.
*/
public void map(ImmutableBytesWritable key, Result value,
OutputCollector<ImmutableBytesWritable, Result> output, Reporter reporter) throws IOException {
@@ -88,8 +88,8 @@ public class GroupingTableMap extends MapReduceBase
/**
* Extract columns values from the current record. This method returns null if any of the columns
- * are not found. Override this method if you want to deal with nulls differently. n * @return
- * array of byte values
+ * are not found. Override this method if you want to deal with nulls differently.
+ * @return array of byte values
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
@@ -115,8 +115,8 @@ public class GroupingTableMap extends MapReduceBase
/**
* Create a key by concatenating multiple column values. Override this function in order to
- * produce different types of keys. n * @return key generated by concatenating multiple column
- * values
+ * produce different types of keys.
+ * @return key generated by concatenating multiple column values
*/
protected ImmutableBytesWritable createGroupKey(byte[][] vals) {
if (vals == null) {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
index 16256942d72..8af0b4b4749 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
@@ -53,7 +53,7 @@ public class IdentityTableMap extends MapReduceBase
}
/**
- * Pass the key, value to reduce nnnnn
+ * Pass the key, value to reduce
*/
public void map(ImmutableBytesWritable key, Result value,
OutputCollector<ImmutableBytesWritable, Result> output, Reporter reporter) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
index 79d5f3dc8c0..29f9478da10 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
@@ -38,7 +38,7 @@ public class IdentityTableReduce extends MapReduceBase
private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReduce.class.getName());
/**
- * No aggregation, output pairs of (key, record) nnnnn
+ * No aggregation, output pairs of (key, record)
*/
public void reduce(ImmutableBytesWritable key, Iterator<Put> values,
OutputCollector<ImmutableBytesWritable, Put> output, Reporter reporter) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
index 24e9da0f28d..0e9f0deaf67 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
@@ -105,7 +105,6 @@ public class MultiTableSnapshotInputFormat extends TableSnapshotInputFormat
* restoreDir. Sets:
* {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#RESTORE_DIRS_KEY},
* {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#SNAPSHOT_TO_SCANS_KEY}
- * nnnn
*/
public static void setInput(Configuration conf, Map<String, Collection<Scan>> snapshotScans,
Path restoreDir) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
index 4f95950589c..2f6324a7ac5 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
@@ -65,9 +65,7 @@ public class RowCounter extends Configured implements Tool {
}
}
- /**
- * n * @return the JobConf n
- */
+ /** Returns the JobConf */
public JobConf createSubmittableJob(String[] args) throws IOException {
JobConf c = new JobConf(getConf(), getClass());
c.setJobName(NAME);
@@ -104,9 +102,6 @@ public class RowCounter extends Configured implements Tool {
return 0;
}
- /**
- * nn
- */
public static void main(String[] args) throws Exception {
int errCode = ToolRunner.run(HBaseConfiguration.create(), new RowCounter(), args);
System.exit(errCode);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index 34736bd6a3d..667629016d3 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -210,7 +210,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
/**
* Allows subclasses to initialize the table information.
* @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
- * @param tableName The {@link TableName} of the table to process. n
+ * @param tableName The {@link TableName} of the table to process.
*/
protected void initializeTable(Connection connection, TableName tableName) throws IOException {
if (this.table != null || this.connection != null) {
@@ -240,7 +240,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
}
/**
- * Allows subclasses to set the {@link TableRecordReader}. n * to provide other
+ * Allows subclasses to set the {@link TableRecordReader}. to provide other
* {@link TableRecordReader} implementations.
*/
protected void setTableRecordReader(TableRecordReader tableRecordReader) {
@@ -248,7 +248,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
}
/**
- * Allows subclasses to set the {@link Filter} to be used. n
+ * Allows subclasses to set the {@link Filter} to be used.
*/
protected void setRowFilter(Filter rowFilter) {
this.rowFilter = rowFilter;
@@ -272,7 +272,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
/**
* Close the Table and related objects that were initialized via
- * {@link #initializeTable(Connection, TableName)}. n
+ * {@link #initializeTable(Connection, TableName)}.
*/
protected void closeTable() throws IOException {
close(table, connection);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
index 1c60bec84cd..c24bd039543 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
@@ -92,7 +92,8 @@ public class TableOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
* Failure to do so will drop writes.
* @param ignored Ignored filesystem
* @param job Current JobConf
- * @param name Name of the job n * @return The newly created writer instance.
+ * @param name Name of the job
+ * @return The newly created writer instance.
* @throws IOException When creating the writer fails.
*/
@Override
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
index c19531be46f..35d69036317 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
@@ -34,14 +34,14 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl();
/**
- * Restart from survivable exceptions by creating a new scanner. nn
+ * Restart from survivable exceptions by creating a new scanner.
*/
public void restart(byte[] firstRow) throws IOException {
this.recordReaderImpl.restart(firstRow);
}
/**
- * Build the scanner. Not done in constructor to allow for extension. n
+ * Build the scanner. Not done in constructor to allow for extension.
*/
public void init() throws IOException {
this.recordReaderImpl.restart(this.recordReaderImpl.getStartRow());
@@ -82,26 +82,28 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
this.recordReaderImpl.setRowFilter(rowFilter);
}
+ @Override
public void close() {
this.recordReaderImpl.close();
}
/**
- * n *
* @see org.apache.hadoop.mapred.RecordReader#createKey()
*/
+ @Override
public ImmutableBytesWritable createKey() {
return this.recordReaderImpl.createKey();
}
/**
- * n *
* @see org.apache.hadoop.mapred.RecordReader#createValue()
*/
+ @Override
public Result createValue() {
return this.recordReaderImpl.createValue();
}
+ @Override
public long getPos() {
// This should be the ordinal tuple in the range;
@@ -109,6 +111,7 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
return this.recordReaderImpl.getPos();
}
+ @Override
public float getProgress() {
// Depends on the total number of tuples and getPos
return this.recordReaderImpl.getPos();
@@ -117,8 +120,9 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
/**
* @param key HStoreKey as input key.
* @param value MapWritable as input value
- * @return true if there was more data n
+ * @return true if there was more data
*/
+ @Override
public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
return this.recordReaderImpl.next(key, value);
}
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
index 80d6668eda1..e327c0a04a6 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
@@ -154,7 +154,6 @@ public class TableRecordReaderImpl {
}
/**
- * n *
* @see org.apache.hadoop.mapred.RecordReader#createKey()
*/
public ImmutableBytesWritable createKey() {
@@ -162,7 +161,6 @@ public class TableRecordReaderImpl {
}
/**
- * n *
* @see org.apache.hadoop.mapred.RecordReader#createValue()
*/
public Result createValue() {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
index 2cb63ba7a6a..0bcb559ae3c 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
@@ -43,7 +43,7 @@ public class TableSplit implements InputSplit, Comparable<TableSplit> {
}
/**
- * Constructor nnnn
+ * Constructor
*/
public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) {
this.m_tableName = tableName;
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
index a48ba49058a..8d12fe5d720 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
@@ -59,7 +59,7 @@ public class CellCreator {
* @param value column value
* @param voffset value offset
* @param vlength value length
- * @return created Cell n
+ * @return created Cell
*/
public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength,
byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset,
@@ -83,7 +83,8 @@ public class CellCreator {
* @param voffset value offset
* @param vlength value length
* @param visExpression visibility expression to be associated with cell
- * @return created Cell n * @deprecated since 0.98.9
+ * @return created Cell
+ * @deprecated since 0.98.9
* @see <a href="https://issues.apache.org/jira/browse/HBASE-10560">HBASE-10560</a>
*/
@Deprecated
@@ -111,7 +112,8 @@ public class CellCreator {
* @param timestamp version timestamp
* @param value column value
* @param voffset value offset
- * @param vlength value length n * @return created Cell n
+ * @param vlength value length
+ * @return created Cell
*/
public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength,
byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
index b41d94fcebb..ccaf55e5025 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
@@ -330,7 +330,7 @@ public class HashTable extends Configured implements Tool {
}
/**
- * Open a TableHash.Reader starting at the first hash at or after the given key. n
+ * Open a TableHash.Reader starting at the first hash at or after the given key.
*/
public Reader newReader(Configuration conf, ImmutableBytesWritable startKey)
throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
index 2bf6e6b5a04..0a811c92ba9 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
@@ -148,7 +148,7 @@ public class ImportTsv extends Configured implements Tool {
/**
* @param columnsSpecification the list of columns to parser out, comma separated. The row key
- * should be the special token TsvParser.ROWKEY_COLUMN_SPEC n
+ * should be the special token TsvParser.ROWKEY_COLUMN_SPEC
*/
public TsvParser(String columnsSpecification, String separatorStr) {
// Configure separator
@@ -416,8 +416,8 @@ public class ImportTsv extends Configured implements Tool {
}
/**
- * Return starting position and length of row key from the specified line bytes. nn * @return
- * Pair of row key offset and length. n
+ * Return starting position and length of row key from the specified line bytes.
+ * @return Pair of row key offset and length.
*/
public Pair<Integer, Integer> parseRowKey(byte[] lineBytes, int length)
throws BadTsvLineException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
index fb42e332833..ef3179830f9 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
@@ -72,7 +72,7 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 {
* function will configure the requisite number of reducers to write HFiles for multple tables
* simultaneously
* @param job See {@link org.apache.hadoop.mapreduce.Job}
- * @param multiTableDescriptors Table descriptor and region locator pairs n
+ * @param multiTableDescriptors Table descriptor and region locator pairs
*/
public static void configureIncrementalLoad(Job job, List<TableInfo> multiTableDescriptors)
throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
index 5a5d1149755..35c12672dea 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
@@ -76,8 +76,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
boolean useWriteAheadLogging;
/**
- * n * HBaseConfiguration to used n * whether to use write ahead logging. This can be turned off
- * ( <tt>false</tt>) to improve performance when bulk loading data.
+ * HBaseConfiguration to used whether to use write ahead logging. This can be turned off (
+ * <tt>false</tt>) to improve performance when bulk loading data.
*/
public MultiTableRecordWriter(Configuration conf, boolean useWriteAheadLogging)
throws IOException {
@@ -88,8 +88,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
}
/**
- * n * the name of the table, as a string
- * @return the named mutator n * if there is a problem opening a table
+ * the name of the table, as a string
+ * @return the named mutator if there is a problem opening a table
*/
BufferedMutator getBufferedMutator(ImmutableBytesWritable tableName) throws IOException {
if (this.connection == null) {
@@ -115,8 +115,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
}
/**
- * Writes an action (Put or Delete) to the specified table. n * the table being updated. n * the
- * update, either a put or a delete. n * if the action is not a put or a delete.
+ * Writes an action (Put or Delete) to the specified table. the table being updated. the update,
+ * either a put or a delete. if the action is not a put or a delete.
*/
@Override
public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
index 3fc99223530..42db1db5f87 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
@@ -78,7 +78,7 @@ public class MultiTableSnapshotInputFormatImpl {
* Return the list of splits extracted from the scans/snapshots pushed to conf by
* {@link #setInput(Configuration, Map, Path)}
* @param conf Configuration to determine splits from
- * @return Return the list of splits extracted from the scans/snapshots pushed to conf n
+ * @return Return the list of splits extracted from the scans/snapshots pushed to conf
*/
public List<TableSnapshotInputFormatImpl.InputSplit> getSplits(Configuration conf)
throws IOException {
@@ -112,7 +112,7 @@ public class MultiTableSnapshotInputFormatImpl {
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
* {@link #setSnapshotToScans(Configuration, Map)}
* @param conf Configuration to extract name -> list<scan> mappings from.
- * @return the snapshot name -> list<scan> mapping pushed to configuration n
+ * @return the snapshot name -> list<scan> mapping pushed to configuration
*/
public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException {
@@ -136,7 +136,7 @@ public class MultiTableSnapshotInputFormatImpl {
}
/**
- * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) nnn
+ * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY})
*/
public void setSnapshotToScans(Configuration conf, Map<String, Collection<Scan>> snapshotScans)
throws IOException {
@@ -161,7 +161,7 @@ public class MultiTableSnapshotInputFormatImpl {
* Retrieve the directories into which snapshots have been restored from
* ({@link #RESTORE_DIRS_KEY})
* @param conf Configuration to extract restore directories from
- * @return the directories into which snapshots have been restored from n
+ * @return the directories into which snapshots have been restored from
*/
public Map<String, Path> getSnapshotDirs(Configuration conf) throws IOException {
List<Map.Entry<String, String>> kvps = ConfigurationUtil.getKeyValues(conf, RESTORE_DIRS_KEY);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
index 9228daf4fb4..9a8c4fbb545 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
@@ -248,7 +248,7 @@ public class RowCounter extends AbstractHBaseTool {
* Sets filter {@link FilterBase} to the {@link Scan} instance. If provided rowRangeList contains
* more than one element, method sets filter which is instance of {@link MultiRowRangeFilter}.
* Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. If rowRangeList
- * contains exactly one element, startRow and stopRow are set to the scan. nn
+ * contains exactly one element, startRow and stopRow are set to the scan.
*/
private static void setScanFilter(Scan scan, List<MultiRowRangeFilter.RowRange> rowRangeList) {
final int size = rowRangeList == null ? 0 : rowRangeList.size();
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index efd872263b1..7d172375c10 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -362,8 +362,7 @@ public abstract class TableInputFormatBase extends InputFormat<ImmutableBytesWri
* @param split A TableSplit corresponding to a range of rowkeys
* @param n Number of ranges after splitting. Pass 1 means no split for the range Pass 2 if
* you want to split the range in two;
- * @return A list of TableSplit, the size of the list is n
- * @throws IllegalArgumentIOException throws IllegalArgumentIOException
+ * @return A list of TableSplit, the size of the list is {@code n}
*/
protected List<InputSplit> createNInputSplitsUniform(InputSplit split, int n)
throws IllegalArgumentIOException {
@@ -581,7 +580,7 @@ public abstract class TableInputFormatBase extends InputFormat<ImmutableBytesWri
/**
* Allows subclasses to initialize the table information.
* @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
- * @param tableName The {@link TableName} of the table to process. n
+ * @param tableName The {@link TableName} of the table to process.
*/
protected void initializeTable(Connection connection, TableName tableName) throws IOException {
if (this.table != null || this.connection != null) {
@@ -642,7 +641,7 @@ public abstract class TableInputFormatBase extends InputFormat<ImmutableBytesWri
/**
* Close the Table and related objects that were initialized via
- * {@link #initializeTable(Connection, TableName)}. n
+ * {@link #initializeTable(Connection, TableName)}.
*/
protected void closeTable() throws IOException {
close(admin, table, regionLocator, connection);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 1f14049c2de..dc3915501b2 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -848,7 +848,7 @@ public class TableMapReduceUtil {
* @param my_class the class to find.
* @param fs the FileSystem with which to qualify the returned path.
* @param packagedClasses a map of class name to path.
- * @return a jar file that contains the class. n
+ * @return a jar file that contains the class.
*/
private static Path findOrCreateJar(Class<?> my_class, FileSystem fs,
Map<String, String> packagedClasses) throws IOException {
@@ -897,7 +897,7 @@ public class TableMapReduceUtil {
* that is not the first thing on the class path that has a class with the same name. Looks first
* on the classpath and then in the <code>packagedClasses</code> map.
* @param my_class the class to find.
- * @return a jar file that contains the class, or null. n
+ * @return a jar file that contains the class, or null.
*/
private static String findContainingJar(Class<?> my_class, Map<String, String> packagedClasses)
throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index e8316c5016f..17c6c0e4551 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -90,7 +90,8 @@ public class TableOutputFormat<KEY> extends OutputFormat<KEY, Mutation> implemen
private BufferedMutator mutator;
/**
- * n *
+ *
+ *
*/
public TableRecordWriter() throws IOException {
String tableName = conf.get(OUTPUT_TABLE);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
index a0df98796b4..6b22ad1bb0f 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
@@ -70,7 +70,8 @@ public class TableRecordReader extends RecordReader<ImmutableBytesWritable, Resu
/**
* Returns the current key.
- * @return The current key. n * @throws InterruptedException When the job is aborted.
+ * @return The current key.
+ * @throws InterruptedException When the job is aborted.
* @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey()
*/
@Override
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
index 79dfe752be0..2fba0197858 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
@@ -92,7 +92,7 @@ public class TextSortReducer
* Handles initializing this class with objects specific to it (i.e., the parser). Common
* initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
* subclass may choose to override this method and call <code>doSetup</code> as well before
- * handling it's own custom params. n
+ * handling it's own custom params.
*/
@Override
protected void setup(Context context) {
@@ -107,7 +107,7 @@ public class TextSortReducer
}
/**
- * Handles common parameter initialization that a subclass might want to leverage. nn
+ * Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context, Configuration conf) {
// If a custom separator has been used,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
index b6c4e814113..4cdb918bdb0 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
@@ -96,7 +96,7 @@ public class TsvImporterMapper extends Mapper<LongWritable, Text, ImmutableBytes
* Handles initializing this class with objects specific to it (i.e., the parser). Common
* initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
* subclass may choose to override this method and call <code>doSetup</code> as well before
- * handling it's own custom params. n
+ * handling it's own custom params.
*/
@Override
protected void setup(Context context) {
@@ -112,7 +112,7 @@ public class TsvImporterMapper extends Mapper<LongWritable, Text, ImmutableBytes
}
/**
- * Handles common parameter initialization that a subclass might want to leverage. n
+ * Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context) {
Configuration conf = context.getConfiguration();
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
index c9f7012edde..1ede86cf830 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
@@ -64,7 +64,7 @@ public class TsvImporterTextMapper
* Handles initializing this class with objects specific to it (i.e., the parser). Common
* initialization that might be leveraged by a subclass is done in <code>doSetup</code>. Hence a
* subclass may choose to override this method and call <code>doSetup</code> as well before
- * handling it's own custom params. n
+ * handling it's own custom params.
*/
@Override
protected void setup(Context context) {
@@ -79,7 +79,7 @@ public class TsvImporterTextMapper
}
/**
- * Handles common parameter initialization that a subclass might want to leverage. n
+ * Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context) {
Configuration conf = context.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 93d439f1608..8fd2d5f7fb2 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -265,7 +265,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
interface Status {
/**
* Sets status
- * @param msg status message n
+ * @param msg status message
*/
void setStatus(final String msg) throws IOException;
}
@@ -549,7 +549,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
/*
* Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write
* out an input file with instruction per client regards which row they are to start on.
- * @param cmd Command to run. n
+ * @param cmd Command to run.
*/
static Job doMapReduce(TestOptions opts, final Configuration conf)
throws IOException, InterruptedException, ClassNotFoundException {
@@ -600,7 +600,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
/*
* Write input file of offsets-per-client for the mapreduce job.
* @param c Configuration
- * @return Directory that contains file written whose name is JOB_INPUT_FILENAME n
+ * @return Directory that contains file written whose name is JOB_INPUT_FILENAME
*/
static Path writeInputFile(final Configuration c, final TestOptions opts) throws IOException {
return writeInputFile(c, opts, new Path("."));
@@ -1354,7 +1354,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
/*
* Run test
- * @return Elapsed time. n
+ * @return Elapsed time.
*/
long test() throws IOException, InterruptedException {
testSetup();
@@ -2448,8 +2448,9 @@ public class PerformanceEvaluation extends Configured implements Tool {
}
/*
- * Format passed integer. n * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version
- * of passed number (Does absolute in case number is negative).
+ * Format passed integer.
+ * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed number (Does
+ * absolute in case number is negative).
*/
public static byte[] format(final int number) {
byte[] b = new byte[ROW_LENGTH];
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
index 7d6dc6e46b7..0ec4c2ffa28 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
@@ -110,15 +110,15 @@ public class TestTableInputFormat {
/**
* Setup a table with two rows and values.
* @param tableName the name of the table to create
- * @return A Table instance for the created table. n
+ * @return A Table instance for the created table.
*/
public static Table createTable(byte[] tableName) throws IOException {
return createTable(tableName, new byte[][] { FAMILY });
}
/**
- * Setup a table with two rows and values per column family. n * @return A Table instance for the
- * created table. n
+ * Setup a table with two rows and values per column family.
+ * @return A Table instance for the created table.
*/
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
@@ -153,7 +153,7 @@ public class TestTableInputFormat {
}
/**
- * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API. nn
+ * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API.
*/
static void runTestMapred(Table table) throws IOException {
org.apache.hadoop.hbase.mapred.TableRecordReader trr =
@@ -181,7 +181,7 @@ public class TestTableInputFormat {
}
/**
- * Create a table that IOE's on first scanner next call n
+ * Create a table that IOE's on first scanner next call
*/
static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
@@ -212,7 +212,7 @@ public class TestTableInputFormat {
}
/**
- * Create a table that throws a DoNoRetryIOException on first scanner next call n
+ * Create a table that throws a DoNoRetryIOException on first scanner next call
*/
static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
@@ -245,7 +245,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming no errors using mapred api. n
+ * Run test assuming no errors using mapred api.
*/
@Test
public void testTableRecordReader() throws IOException {
@@ -254,7 +254,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming Scanner IOException failure using mapred api, n
+ * Run test assuming Scanner IOException failure using mapred api,
*/
@Test
public void testTableRecordReaderScannerFail() throws IOException {
@@ -263,7 +263,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming Scanner IOException failure using mapred api, n
+ * Run test assuming Scanner IOException failure using mapred api,
*/
@Test(expected = IOException.class)
public void testTableRecordReaderScannerFailTwice() throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
index 12a5650c981..0e7ff24a1da 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
@@ -201,7 +201,7 @@ public abstract class MultiTableInputFormatTestBase {
}
/**
- * Tests a MR scan using specific start and stop rows. nnn
+ * Tests a MR scan using specific start and stop rows.
*/
private void testScan(String start, String stop, String last)
throws IOException, InterruptedException, ClassNotFoundException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 54d171659d0..51e9e1e7755 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -822,7 +822,7 @@ public class TestHFileOutputFormat2 {
/**
* Test for {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests that the
- * family compression map is correctly serialized into and deserialized from configuration n
+ * family compression map is correctly serialized into and deserialized from configuration
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
@@ -888,7 +888,7 @@ public class TestHFileOutputFormat2 {
/**
* Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the
- * family bloom type map is correctly serialized into and deserialized from configuration n
+ * family bloom type map is correctly serialized into and deserialized from configuration
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
@@ -949,7 +949,7 @@ public class TestHFileOutputFormat2 {
/**
* Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the
- * family block size map is correctly serialized into and deserialized from configuration n
+ * family block size map is correctly serialized into and deserialized from configuration
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
@@ -1014,7 +1014,7 @@ public class TestHFileOutputFormat2 {
/**
* Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that
* the family data block encoding map is correctly serialized into and deserialized from
- * configuration n
+ * configuration
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index 57ecb5aefa1..86bf3220658 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -173,8 +173,8 @@ public class TestImportExport {
}
/**
- * Runs an export job with the specified command line args n * @return true if job completed
- * successfully nnn
+ * Runs an export job with the specified command line args
+ * @return true if job completed successfully
*/
protected boolean runExport(String[] args) throws Throwable {
// need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -187,8 +187,8 @@ public class TestImportExport {
}
/**
- * Runs an import job with the specified command line args n * @return true if job completed
- * successfully nnn
+ * Runs an import job with the specified command line args
+ * @return true if job completed successfully
*/
boolean runImport(String[] args) throws Throwable {
// need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -197,7 +197,7 @@ public class TestImportExport {
}
/**
- * Test simple replication case with column mapping n
+ * Test simple replication case with column mapping
*/
@Test
public void testSimpleCase() throws Throwable {
@@ -249,7 +249,7 @@ public class TestImportExport {
}
/**
- * Test export hbase:meta table n
+ * Test export hbase:meta table
*/
@Test
public void testMetaExport() throws Throwable {
@@ -259,7 +259,7 @@ public class TestImportExport {
}
/**
- * Test import data from 0.94 exported file n
+ * Test import data from 0.94 exported file
*/
@Test
public void testImport94Table() throws Throwable {
@@ -508,7 +508,7 @@ public class TestImportExport {
/**
* Count the number of keyvalues in the specified table with the given filter
* @param table the table to scan
- * @return the number of keyvalues found n
+ * @return the number of keyvalues found
*/
private int getCount(Table table, Filter filter) throws IOException {
Scan scan = new Scan();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
index b73cc7e1abb..a7b977620a1 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
@@ -154,9 +154,9 @@ public class TestImportTSVWithOperationAttributes implements Configurable {
/**
* Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv
* <code>Tool</code> instance so that other tests can inspect it for further validation as
- * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n *
- * Any arguments to pass BEFORE inputFile path is appended. n * @return The Tool instance used to
- * run the test.
+ * necessary. This method is static to insure non-reliance on instance's util/conf facilities. Any
+ * arguments to pass BEFORE inputFile path is appended.
+ * @return The Tool instance used to run the test.
*/
private Tool doMROnTableTest(HBaseTestingUtil util, String family, String data, String[] args,
int valueMultiplier, boolean dataAvailable) throws Exception {
@@ -193,7 +193,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable {
}
/**
- * Confirm ImportTsv via data in online table. n
+ * Confirm ImportTsv via data in online table.
*/
private static void validateTable(Configuration conf, TableName tableName, String family,
int valueMultiplier, boolean dataAvailable) throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
index cae349ce05d..e15181e9c94 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
@@ -316,8 +316,8 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
/**
* Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv
* <code>Tool</code> instance so that other tests can inspect it for further validation as
- * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n *
- * Any arguments to pass BEFORE inputFile path is appended.
+ * necessary. This method is static to insure non-reliance on instance's util/conf facilities. Any
+ * arguments to pass BEFORE inputFile path is appended.
* @param expectedKVCount Expected KV count. pass -1 to skip the kvcount check
* @return The Tool instance used to run the test.
*/
@@ -461,7 +461,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
* Method returns the total KVs in given hfile
* @param fs File System
* @param p HFile path
- * @return KV count in the given hfile n
+ * @return KV count in the given hfile
*/
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
Configuration conf = util.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
index 83634742b28..9316b09b8c9 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
@@ -540,7 +540,7 @@ public class TestImportTsv implements Configurable {
* Method returns the total KVs in given hfile
* @param fs File System
* @param p HFile path
- * @return KV count in the given hfile n
+ * @return KV count in the given hfile
*/
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
Configuration conf = util.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
index bfccff65c66..7d099aa44e2 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
@@ -80,7 +80,7 @@ public class TestMultiTableInputFormatBase {
/**
* Test getSplits only puts up one Connection. In past it has put up many Connections. Each
* Connection setup comes with a fresh new cache so we have to do fresh hit on hbase:meta. Should
- * only do one Connection when doing getSplits even if a MultiTableInputFormat. n
+ * only do one Connection when doing getSplits even if a MultiTableInputFormat.
*/
@Test
public void testMRSplitsConnectionCount() throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
index 3db7fa7ef0b..f5f0fdf169a 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
@@ -93,7 +93,7 @@ public class TestMultithreadedTableMapper {
public static class ProcessContentsMapper extends TableMapper<ImmutableBytesWritable, Put> {
/**
- * Pass the key, and reversed value to reduce nnnn
+ * Pass the key, and reversed value to reduce
*/
@Override
public void map(ImmutableBytesWritable key, Result value, Context context)
@@ -118,7 +118,7 @@ public class TestMultithreadedTableMapper {
}
/**
- * Test multithreadedTableMappper map/reduce against a multi-region table nnn
+ * Test multithreadedTableMappper map/reduce against a multi-region table
*/
@Test
public void testMultithreadedTableMapper()
@@ -184,7 +184,8 @@ public class TestMultithreadedTableMapper {
/**
* Looks at every value of the mapreduce output and verifies that indeed the values have been
* reversed.
- * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value
+ * @param table Table to scan.
+ * @throws NullPointerException if we failed to find a cell value
*/
private void verifyAttempt(final Table table) throws IOException, NullPointerException {
Scan scan = new Scan();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
index 49daac88e37..8f15fb1c170 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
@@ -87,7 +87,7 @@ public class TestRowCounter {
}
/**
- * Test a case when no column was specified in command line arguments. n
+ * Test a case when no column was specified in command line arguments.
*/
@Test
public void testRowCounterNoColumn() throws Exception {
@@ -96,7 +96,7 @@ public class TestRowCounter {
}
/**
- * Test a case when the column specified in command line arguments is exclusive for few rows. n
+ * Test a case when the column specified in command line arguments is exclusive for few rows.
*/
@Test
public void testRowCounterExclusiveColumn() throws Exception {
@@ -106,7 +106,7 @@ public class TestRowCounter {
/**
* Test a case when the column specified in command line arguments is one for which the qualifier
- * contains colons. n
+ * contains colons.
*/
@Test
public void testRowCounterColumnWithColonInQualifier() throws Exception {
@@ -116,7 +116,7 @@ public class TestRowCounter {
/**
* Test a case when the column specified in command line arguments is not part of first KV for a
- * row. n
+ * row.
*/
@Test
public void testRowCounterHiddenColumn() throws Exception {
@@ -126,7 +126,7 @@ public class TestRowCounter {
/**
* Test a case when the column specified in command line arguments is exclusive for few rows and
- * also a row range filter is specified n
+ * also a row range filter is specified
*/
@Test
public void testRowCounterColumnAndRowRange() throws Exception {
@@ -135,7 +135,7 @@ public class TestRowCounter {
}
/**
- * Test a case when a range is specified with single range of start-end keys n
+ * Test a case when a range is specified with single range of start-end keys
*/
@Test
public void testRowCounterRowSingleRange() throws Exception {
@@ -144,7 +144,7 @@ public class TestRowCounter {
}
/**
- * Test a case when a range is specified with single range with end key only n
+ * Test a case when a range is specified with single range with end key only
*/
@Test
public void testRowCounterRowSingleRangeUpperBound() throws Exception {
@@ -153,7 +153,7 @@ public class TestRowCounter {
}
/**
- * Test a case when a range is specified with two ranges where one range is with end key only n
+ * Test a case when a range is specified with two ranges where one range is with end key only
*/
@Test
public void testRowCounterRowMultiRangeUpperBound() throws Exception {
@@ -162,7 +162,7 @@ public class TestRowCounter {
}
/**
- * Test a case when a range is specified with multiple ranges of start-end keys n
+ * Test a case when a range is specified with multiple ranges of start-end keys
*/
@Test
public void testRowCounterRowMultiRange() throws Exception {
@@ -172,7 +172,7 @@ public class TestRowCounter {
/**
* Test a case when a range is specified with multiple ranges of start-end keys; one range is
- * filled, another two are not n
+ * filled, another two are not
*/
@Test
public void testRowCounterRowMultiEmptyRange() throws Exception {
@@ -193,7 +193,7 @@ public class TestRowCounter {
}
/**
- * Test a case when the timerange is specified with --starttime and --endtime options n
+ * Test a case when the timerange is specified with --starttime and --endtime options
*/
@Test
public void testRowCounterTimeRange() throws Exception {
@@ -241,7 +241,7 @@ public class TestRowCounter {
/**
* Run the RowCounter map reduce job and verify the row count.
* @param args the command line arguments to be used for rowcounter job.
- * @param expectedCount the expected row count (result of map reduce job). n
+ * @param expectedCount the expected row count (result of map reduce job).
*/
private void runRowCount(String[] args, int expectedCount) throws Exception {
RowCounter rowCounter = new RowCounter();
@@ -433,7 +433,7 @@ public class TestRowCounter {
/**
* Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have two columns, Few have
- * one. nn
+ * one.
*/
private static void writeRows(Table table, int totalRows, int rowsWithOneCol) throws IOException {
final byte[] family = Bytes.toBytes(COL_FAM);
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
index bf1a7439b4e..ca0b9df79d3 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
@@ -105,15 +105,16 @@ public class TestTableInputFormat {
}
/**
- * Setup a table with two rows and values. n * @return A Table instance for the created table. n
+ * Setup a table with two rows and values.
+ * @return A Table instance for the created table.
*/
public static Table createTable(byte[] tableName) throws IOException {
return createTable(tableName, new byte[][] { FAMILY });
}
/**
- * Setup a table with two rows and values per column family. n * @return A Table instance for the
- * created table. n
+ * Setup a table with two rows and values per column family.
+ * @return A Table instance for the created table.
*/
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
@@ -148,7 +149,7 @@ public class TestTableInputFormat {
}
/**
- * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API. nnn
+ * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API.
*/
static void runTestMapreduce(Table table) throws IOException, InterruptedException {
org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr =
@@ -182,7 +183,7 @@ public class TestTableInputFormat {
}
/**
- * Create a table that IOE's on first scanner next call n
+ * Create a table that IOE's on first scanner next call
*/
static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
@@ -213,7 +214,7 @@ public class TestTableInputFormat {
}
/**
- * Create a table that throws a NotServingRegionException on first scanner next call n
+ * Create a table that throws a NotServingRegionException on first scanner next call
*/
static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
@@ -246,7 +247,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming no errors using newer mapreduce api nn
+ * Run test assuming no errors using newer mapreduce api
*/
@Test
public void testTableRecordReaderMapreduce() throws IOException, InterruptedException {
@@ -255,7 +256,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming Scanner IOException failure using newer mapreduce api nn
+ * Run test assuming Scanner IOException failure using newer mapreduce api
*/
@Test
public void testTableRecordReaderScannerFailMapreduce() throws IOException, InterruptedException {
@@ -264,7 +265,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming Scanner IOException failure using newer mapreduce api nn
+ * Run test assuming Scanner IOException failure using newer mapreduce api
*/
@Test(expected = IOException.class)
public void testTableRecordReaderScannerFailMapreduceTwice()
@@ -274,8 +275,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming NotServingRegionException using newer mapreduce api n * @throws
- * org.apache.hadoop.hbase.DoNotRetryIOException
+ * Run test assuming NotServingRegionException using newer mapreduce api
*/
@Test
public void testTableRecordReaderScannerTimeoutMapreduce()
@@ -285,8 +285,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming NotServingRegionException using newer mapreduce api n * @throws
- * org.apache.hadoop.hbase.NotServingRegionException
+ * Run test assuming NotServingRegionException using newer mapreduce api
*/
@Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class)
public void testTableRecordReaderScannerTimeoutMapreduceTwice()
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
index e1bd1626870..99606050667 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
@@ -76,7 +76,7 @@ public class TestTableMapReduce extends TestTableMapReduceBase {
static class ProcessContentsMapper extends TableMapper<ImmutableBytesWritable, Put> {
/**
- * Pass the key, and reversed value to reduce nnnn
+ * Pass the key, and reversed value to reduce
*/
@Override
public void map(ImmutableBytesWritable key, Result value, Context context)
@@ -136,7 +136,7 @@ public class TestTableMapReduce extends TestTableMapReduceBase {
}
/**
- * Verify scan counters are emitted from the job nn
+ * Verify scan counters are emitted from the job
*/
private void verifyJobCountersAreEmitted(Job job) throws IOException {
Counters counters = job.getCounters();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
index 7490587b109..477ea5d7f6d 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
@@ -83,7 +83,7 @@ public abstract class TestTableMapReduceBase {
}
/**
- * Test a map/reduce against a multi-region table n
+ * Test a map/reduce against a multi-region table
*/
@Test
public void testMultiRegionTable() throws IOException {
@@ -152,7 +152,8 @@ public abstract class TestTableMapReduceBase {
/**
* Looks at every value of the mapreduce output and verifies that indeed the values have been
* reversed.
- * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value
+ * @param table Table to scan.
+ * @throws NullPointerException if we failed to find a cell value
*/
private void verifyAttempt(final Table table) throws IOException, NullPointerException {
Scan scan = new Scan();
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
index 7fda7422023..0c518de221f 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -148,8 +148,7 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool {
/**
* Pass one or more log file names and formatting options and it will dump out a text version of
- * the contents on <code>stdout</code>. n * Command line arguments n * Thrown upon file system
- * errors etc.
+ * the contents on <code>stdout</code>. Command line arguments Thrown upon file system errors etc.
*/
@Override
public int run(final String[] args) throws IOException {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
index 61dede2ae83..47852f4df2b 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
@@ -41,7 +41,7 @@ public class ExistsResource extends ResourceBase {
TableResource tableResource;
/**
- * Constructor nn
+ * Constructor
*/
public ExistsResource(TableResource tableResource) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index 68d774e420c..cc5fb22265c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -44,7 +44,7 @@ public class MultiRowResource extends ResourceBase implements Constants {
String[] columns = null;
/**
- * Constructor nn * @throws java.io.IOException
+ * Constructor
*/
public MultiRowResource(TableResource tableResource, String versions, String columnsStr)
throws IOException {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
index e1282c493ab..e27ee6ddb91 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
@@ -62,14 +62,14 @@ public class NamespacesInstanceResource extends ResourceBase {
boolean queryTables = false;
/**
- * Constructor for standard NamespaceInstanceResource. n
+ * Constructor for standard NamespaceInstanceResource.
*/
public NamespacesInstanceResource(String namespace) throws IOException {
this(namespace, false);
}
/**
- * Constructor for querying namespace table list via NamespaceInstanceResource. n
+ * Constructor for querying namespace table list via NamespaceInstanceResource.
*/
public NamespacesInstanceResource(String namespace, boolean queryTables) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
index a3c0e2d2f1a..aeccda24f19 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
@@ -44,7 +44,7 @@ public class NamespacesResource extends ResourceBase {
private static final Logger LOG = LoggerFactory.getLogger(NamespacesResource.class);
/**
- * Constructor n
+ * Constructor
*/
public NamespacesResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
index 39a7ba71dd6..2e01ff24d47 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
@@ -32,7 +32,7 @@ public interface ProtobufMessageHandler {
/**
* Initialize the model from a protobuf representation.
* @param message the raw bytes of the protobuf message
- * @return reference to self for convenience n
+ * @return reference to self for convenience
*/
ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException;
}
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
index 79760aead9d..7212993fb8d 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
@@ -90,7 +90,7 @@ public class RESTServlet implements Constants {
/**
* Constructor with existing configuration
* @param conf existing configuration
- * @param userProvider the login user provider n
+ * @param userProvider the login user provider
*/
RESTServlet(final Configuration conf, final UserProvider userProvider) throws IOException {
this.realUser = userProvider.getCurrent().getUGI();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
index 21c97302603..17beae40f7b 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
@@ -55,7 +55,7 @@ public class RegionsResource extends ResourceBase {
TableResource tableResource;
/**
- * Constructor nn
+ * Constructor
*/
public RegionsResource(TableResource tableResource) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
index 9baf7aa7c04..babb3d1152c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
@@ -48,7 +48,7 @@ public class RootResource extends ResourceBase {
}
/**
- * Constructor n
+ * Constructor
*/
public RootResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index cfd63aa2d1c..b599b0b1949 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -69,7 +69,7 @@ public class RowResource extends ResourceBase {
private boolean returnResult = false;
/**
- * Constructor nnnnnn
+ * Constructor
*/
public RowResource(TableResource tableResource, String rowspec, String versions, String check,
String returnResult) throws IOException {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
index f5606bb25d7..49801b4f7d8 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
@@ -52,7 +52,7 @@ public class ScannerResource extends ResourceBase {
TableResource tableResource;
/**
- * Constructor nn
+ * Constructor
*/
public ScannerResource(TableResource tableResource) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
index 8348b79985c..958a1288d4f 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -51,7 +51,7 @@ public class StorageClusterStatusResource extends ResourceBase {
}
/**
- * Constructor n
+ * Constructor
*/
public StorageClusterStatusResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
index ea7641e54cd..00c243aec72 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
@@ -45,7 +45,7 @@ public class StorageClusterVersionResource extends ResourceBase {
}
/**
- * Constructor n
+ * Constructor
*/
public StorageClusterVersionResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
index 2fe26deb542..dbac4686520 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
@@ -46,7 +46,7 @@ public class TableResource extends ResourceBase {
private static final Logger LOG = LoggerFactory.getLogger(TableResource.class);
/**
- * Constructor nn
+ * Constructor
*/
public TableResource(String table) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
index 8b71f708645..d78ba90cd8c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
@@ -53,7 +53,7 @@ public class VersionResource extends ResourceBase {
}
/**
- * Constructor n
+ * Constructor
*/
public VersionResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
index 85cb2af86a8..3f406fb5d92 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
@@ -255,7 +255,7 @@ public class Client {
* @param method the transaction method
* @param headers HTTP header values to send
* @param path the properly urlencoded path
- * @return the HTTP response code n
+ * @return the HTTP response code
*/
public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers,
String path) throws IOException {
@@ -309,7 +309,7 @@ public class Client {
* @param method the transaction method
* @param headers HTTP header values to send
* @param uri a properly urlencoded URI
- * @return the HTTP response code n
+ * @return the HTTP response code
*/
public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri)
throws IOException {
@@ -348,7 +348,7 @@ public class Client {
* @param method the HTTP method
* @param headers HTTP header values to send
* @param path the properly urlencoded path or URI
- * @return the HTTP response code n
+ * @return the HTTP response code
*/
public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path)
throws IOException {
@@ -407,7 +407,7 @@ public class Client {
/**
* Send a HEAD request
* @param path the path or URI
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response head(String path) throws IOException {
return head(cluster, path, null);
@@ -418,7 +418,7 @@ public class Client {
* @param cluster the cluster definition
* @param path the path or URI
* @param headers the HTTP headers to include in the request
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response head(Cluster cluster, String path, Header[] headers) throws IOException {
HttpHead method = new HttpHead(path);
@@ -433,7 +433,7 @@ public class Client {
/**
* Send a GET request
* @param path the path or URI
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(String path) throws IOException {
return get(cluster, path);
@@ -443,7 +443,7 @@ public class Client {
* Send a GET request
* @param cluster the cluster definition
* @param path the path or URI
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(Cluster cluster, String path) throws IOException {
return get(cluster, path, EMPTY_HEADER_ARRAY);
@@ -453,7 +453,7 @@ public class Client {
* Send a GET request
* @param path the path or URI
* @param accept Accept header value
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(String path, String accept) throws IOException {
return get(cluster, path, accept);
@@ -464,7 +464,7 @@ public class Client {
* @param cluster the cluster definition
* @param path the path or URI
* @param accept Accept header value
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(Cluster cluster, String path, String accept) throws IOException {
Header[] headers = new Header[1];
@@ -476,7 +476,7 @@ public class Client {
* Send a GET request
* @param path the path or URI
* @param headers the HTTP headers to include in the request, <tt>Accept</tt> must be supplied
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(String path, Header[] headers) throws IOException {
return get(cluster, path, headers);
@@ -522,7 +522,7 @@ public class Client {
* @param c the cluster definition
* @param path the path or URI
* @param headers the HTTP headers to include in the request
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(Cluster c, String path, Header[] headers) throws IOException {
if (httpGet != null) {
@@ -539,7 +539,7 @@ public class Client {
* @param path the path or URI
* @param contentType the content MIME type
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response put(String path, String contentType, byte[] content) throws IOException {
return put(cluster, path, contentType, content);
@@ -551,7 +551,7 @@ public class Client {
* @param contentType the content MIME type
* @param content the content bytes
* @param extraHdr extra Header to send
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response put(String path, String contentType, byte[] content, Header extraHdr)
throws IOException {
@@ -600,7 +600,7 @@ public class Client {
* @param path the path or URI
* @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response put(String path, Header[] headers, byte[] content) throws IOException {
return put(cluster, path, headers, content);
@@ -612,7 +612,7 @@ public class Client {
* @param path the path or URI
* @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response put(Cluster cluster, String path, Header[] headers, byte[] content)
throws IOException {
@@ -633,7 +633,7 @@ public class Client {
* @param path the path or URI
* @param contentType the content MIME type
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response post(String path, String contentType, byte[] content) throws IOException {
return post(cluster, path, contentType, content);
@@ -645,7 +645,7 @@ public class Client {
* @param contentType the content MIME type
* @param content the content bytes
* @param extraHdr additional Header to send
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response post(String path, String contentType, byte[] content, Header extraHdr)
throws IOException {
@@ -694,7 +694,7 @@ public class Client {
* @param path the path or URI
* @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response post(String path, Header[] headers, byte[] content) throws IOException {
return post(cluster, path, headers, content);
@@ -706,7 +706,7 @@ public class Client {
* @param path the path or URI
* @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response post(Cluster cluster, String path, Header[] headers, byte[] content)
throws IOException {
@@ -725,7 +725,7 @@ public class Client {
/**
* Send a DELETE request
* @param path the path or URI
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response delete(String path) throws IOException {
return delete(cluster, path);
@@ -735,7 +735,7 @@ public class Client {
* Send a DELETE request
* @param path the path or URI
* @param extraHdr additional Header to send
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response delete(String path, Header extraHdr) throws IOException {
return delete(cluster, path, extraHdr);
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
index 47e67dbea5a..9071c31614c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
@@ -139,7 +139,7 @@ public class RestCsrfPreventionFilter implements Filter {
String getHeader(String header);
/**
- * Returns the method. n
+ * Returns the method.
*/
String getMethod();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
index 48c7e12202b..eda3267bf58 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
@@ -82,21 +82,21 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
}
/**
- * Constructor nn
+ * Constructor
*/
public CellModel(byte[] column, byte[] value) {
this(column, HConstants.LATEST_TIMESTAMP, value);
}
/**
- * Constructor nnn
+ * Constructor
*/
public CellModel(byte[] column, byte[] qualifier, byte[] value) {
this(column, qualifier, HConstants.LATEST_TIMESTAMP, value);
}
/**
- * Constructor from KeyValue n
+ * Constructor from KeyValue
*/
public CellModel(org.apache.hadoop.hbase.Cell cell) {
this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(),
@@ -104,7 +104,7 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
}
/**
- * Constructor nnn
+ * Constructor
*/
public CellModel(byte[] column, long timestamp, byte[] value) {
this.column = column;
@@ -113,7 +113,7 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
}
/**
- * Constructor nnnn
+ * Constructor
*/
public CellModel(byte[] column, byte[] qualifier, long timestamp, byte[] value) {
this.column = CellUtil.makeColumn(column, qualifier);
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
index c45ca38be9f..64b46f2956c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
@@ -64,7 +64,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan
/**
* Constructor to use if namespace does not exist in HBASE.
- * @param namespaceName the namespace name. n
+ * @param namespaceName the namespace name.
*/
public NamespacesInstanceModel(String namespaceName) throws IOException {
this(null, namespaceName);
@@ -73,7 +73,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan
/**
* Constructor
* @param admin the administrative API
- * @param namespaceName the namespace name. n
+ * @param namespaceName the namespace name.
*/
public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOException {
this.namespaceName = namespaceName;
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
index c9755532c49..e866c7a935d 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
@@ -58,7 +58,7 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler {
/**
* Constructor
- * @param admin the administrative API n
+ * @param admin the administrative API
*/
public NamespacesModel(Admin admin) throws IOException {
NamespaceDescriptor[] nds = admin.listNamespaceDescriptors();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
index e2b20aaa84e..3655a379804 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
@@ -507,7 +507,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
/**
* @param s the JSON representation of the filter
- * @return the filter n
+ * @return the filter
*/
public static Filter buildFilter(String s) throws Exception {
FilterModel model =
@@ -518,7 +518,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
/**
* @param filter the filter
- * @return the JSON representation of the filter n
+ * @return the JSON representation of the filter
*/
public static String stringifyFilter(final Filter filter) throws Exception {
return getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE)
@@ -528,7 +528,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":");
/**
- * @param scan the scan specification n
+ * @param scan the scan specification
*/
public static ScannerModel fromScan(Scan scan) throws Exception {
ScannerModel model = new ScannerModel();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
index c1023353a70..74d0732ec91 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
@@ -60,7 +60,7 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler {
}
/**
- * Constructor n
+ * Constructor
*/
public TableInfoModel(String name) {
this.name = name;
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
index 51a2bc567cd..32459738002 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
@@ -48,7 +48,7 @@ public class TableModel implements Serializable {
}
/**
- * Constructor n
+ * Constructor
*/
public TableModel(String name) {
super();
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
index 82f3a9481c8..bccc97deca8 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
@@ -52,7 +52,7 @@ public class RemoteAdmin {
private static volatile Unmarshaller versionClusterUnmarshaller;
/**
- * Constructor nn
+ * Constructor
*/
public RemoteAdmin(Client client, Configuration conf) {
this(client, conf, null);
@@ -69,7 +69,7 @@ public class RemoteAdmin {
}
/**
- * Constructor nnn
+ * Constructor
*/
public RemoteAdmin(Client client, Configuration conf, String accessToken) {
this.client = client;
@@ -89,8 +89,8 @@ public class RemoteAdmin {
}
/**
- * @return string representing the rest api's version n * if the endpoint does not exist, there is
- * a timeout, or some other general failure mode
+ * @return string representing the rest api's version if the endpoint does not exist, there is a
+ * timeout, or some other general failure mode
*/
public VersionModel getRestVersion() throws IOException {
@@ -169,8 +169,8 @@ public class RemoteAdmin {
}
/**
- * @return string representing the cluster's version n * if the endpoint does not exist, there is
- * a timeout, or some other general failure mode
+ * @return string representing the cluster's version if the endpoint does not exist, there is a
+ * timeout, or some other general failure mode
*/
public StorageClusterVersionModel getClusterVersion() throws IOException {
@@ -336,8 +336,8 @@ public class RemoteAdmin {
}
/**
- * @return string representing the cluster's version n * if the endpoint does not exist, there is
- * a timeout, or some other general failure mode
+ * @return string representing the cluster's version if the endpoint does not exist, there is a
+ * timeout, or some other general failure mode
*/
public TableListModel getTableList() throws IOException {
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 021c03ce85b..914ed5740a9 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -575,7 +575,7 @@ public class TestRemoteTable {
/**
* Tests scanner with limitation limit the number of rows each scanner scan fetch at life time The
- * number of rows returned should be equal to the limit n
+ * number of rows returned should be equal to the limit
*/
@Test
public void testLimitedScan() throws Exception {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
index 2ef58351439..11e6b07a040 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
@@ -577,7 +577,7 @@ public abstract class HBaseServerBase<R extends HBaseRpcServicesBase<?>> extends
}
/**
- * get NamedQueue Provider to add different logs to ringbuffer n
+ * get NamedQueue Provider to add different logs to ringbuffer
*/
public NamedQueueRecorder getNamedQueueRecorder() {
return this.namedQueueRecorder;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java
index 44498200991..5eca7afd28b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java
@@ -34,7 +34,7 @@ class HealthReport {
}
/**
- * Gets the status of the region server. n
+ * Gets the status of the region server.
*/
HealthCheckerExitStatus getStatus() {
return status;
@@ -46,7 +46,7 @@ class HealthReport {
}
/**
- * Gets the health report of the region server. n
+ * Gets the health report of the region server.
*/
String getHealthReport() {
return healthReport;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index 38775bf9384..0839a23f42d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -68,7 +68,7 @@ public class LocalHBaseCluster {
private final Class<? extends HRegionServer> regionServerClass;
/**
- * Constructor. nn
+ * Constructor.
*/
public LocalHBaseCluster(final Configuration conf) throws IOException {
this(conf, DEFAULT_NO);
@@ -77,7 +77,7 @@ public class LocalHBaseCluster {
/**
* Constructor.
* @param conf Configuration to use. Post construction has the master's address.
- * @param noRegionServers Count of regionservers to start. n
+ * @param noRegionServers Count of regionservers to start.
*/
public LocalHBaseCluster(final Configuration conf, final int noRegionServers) throws IOException {
this(conf, 1, 0, noRegionServers, getMasterImplementation(conf),
@@ -88,7 +88,7 @@ public class LocalHBaseCluster {
* Constructor.
* @param conf Configuration to use. Post construction has the active master address.
* @param noMasters Count of masters to start.
- * @param noRegionServers Count of regionservers to start. n
+ * @param noRegionServers Count of regionservers to start.
*/
public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers)
throws IOException {
@@ -118,7 +118,7 @@ public class LocalHBaseCluster {
* Constructor.
* @param conf Configuration to use. Post construction has the master's address.
* @param noMasters Count of masters to start.
- * @param noRegionServers Count of regionservers to start. nnn
+ * @param noRegionServers Count of regionservers to start.
*/
@SuppressWarnings("unchecked")
public LocalHBaseCluster(final Configuration conf, final int noMasters,
@@ -242,9 +242,7 @@ public class LocalHBaseCluster {
});
}
- /**
- * n * @return region server
- */
+ /** Returns region server */
public HRegionServer getRegionServer(int serverNumber) {
return regionThreads.get(serverNumber).getRegionServer();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
index 47f6938652d..62da616acb5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
@@ -30,7 +30,7 @@ public interface RegionStateListener {
// state than introduce a whole new listening mechanism? St.Ack
/**
* Process region split event.
- * @param hri An instance of RegionInfo n
+ * @param hri An instance of RegionInfo
*/
void onRegionSplit(RegionInfo hri) throws IOException;
@@ -42,7 +42,7 @@ public interface RegionStateListener {
void onRegionSplitReverted(RegionInfo hri) throws IOException;
/**
- * Process region merge event. n
+ * Process region merge event.
*/
void onRegionMerged(RegionInfo mergedRegion) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
index 280ad3b7c47..d9bec2e3d81 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
@@ -146,8 +146,8 @@ public class SplitLogTask {
/**
* @param data Serialized date to parse.
- * @return An SplitLogTaskState instance made of the passed <code>data</code> n * @see
- * #toByteArray()
+ * @return An SplitLogTaskState instance made of the passed <code>data</code>
+ * @see #toByteArray()
*/
public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(data);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index 68dc87502e0..8615efe6a7e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -692,12 +692,12 @@ public class HFileArchiver {
/**
* @return if this is a directory, returns all the children in the directory, otherwise returns
- * an empty list n
+ * an empty list
*/
abstract Collection<File> getChildren() throws IOException;
/**
- * close any outside readers of the file n
+ * close any outside readers of the file
*/
abstract void close() throws IOException;
@@ -708,7 +708,8 @@ public class HFileArchiver {
abstract Path getPath();
/**
- * Move the file to the given destination n * @return <tt>true</tt> on success n
+ * Move the file to the given destination
+ * @return <tt>true</tt> on success
*/
public boolean moveAndClose(Path dest) throws IOException {
this.close();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
index 618a5a66524..2d4bea3a7e3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
@@ -93,8 +93,7 @@ public final class VersionInfoUtil {
}
/**
- * n * @return the passed-in <code>version</code> int as a version String (e.g. 0x0103004 is
- * 1.3.4)
+ * Returns the passed-in <code>version</code> int as a version String (e.g. 0x0103004 is 1.3.4)
*/
public static String versionNumberToString(final int version) {
return String.format("%d.%d.%d", ((version >> 20) & 0xff), ((version >> 12) & 0xff),
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
index 29aa273b2b3..a7f813aeea0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
@@ -131,7 +131,7 @@ public interface SplitLogManagerCoordination {
void deleteTask(String taskName);
/**
- * Support method to init constants such as timeout. Mostly required for UTs. n
+ * Support method to init constants such as timeout. Mostly required for UTs.
*/
void init() throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index fcf103c82e2..eeba55d2d54 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -139,7 +139,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener
* It is possible for a task to stay in UNASSIGNED state indefinitely - say SplitLogManager wants
* to resubmit a task. It forces the task to UNASSIGNED state but it dies before it could create
* the RESCAN task node to signal the SplitLogWorkers to pick up the task. To prevent this
- * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup. n
+ * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup.
*/
private void handleUnassignedTask(String path) {
if (ZKSplitLog.isRescanNode(watcher, path)) {
@@ -551,7 +551,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener
* partially done tasks are present. taskname is the name of the task that was put up in
* zookeeper.
* <p>
- * nn * @return DONE if task completed successfully, ERR otherwise
+ * @return DONE if task completed successfully, ERR otherwise
*/
Status finish(ServerName workerName, String taskname);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index 7acb0891dbc..6def70f9714 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -374,7 +374,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLog
* in a cluster.
* <p>
* Synchronization using <code>taskReadySeq</code> ensures that it will try to grab every task
- * that has been put up n
+ * that has been put up
*/
@Override
public void taskLoop() throws InterruptedException {
@@ -534,7 +534,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLog
*/
/**
* endTask() can fail and the only way to recover out of it is for the
- * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. nn
+ * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
*/
@Override
public void endTask(SplitLogTask slt, LongAdder ctr, SplitTaskDetails details) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
index cc8977f4581..c1ba9e274ad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
@@ -655,7 +655,7 @@ public abstract class CoprocessorHost<C extends Coprocessor, E extends Coprocess
* may remain shutdown if any exception occurs during next coprocessor execution which prevent
* master/regionserver stop or cluster shutdown. (Refer:
* <a href="https://issues.apache.org/jira/browse/HBASE-16663">HBASE-16663</a>
- * @return true if bypaas coprocessor execution, false if not. n
+ * @return true if bypaas coprocessor execution, false if not.
*/
protected <O> boolean execShutdown(final ObserverOperation<O> observerOperation)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index ad381dd4ef3..175ff25e761 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -493,7 +493,7 @@ public interface MasterObserver {
/**
* Called prior to unassigning a given region.
- * @param ctx the environment to interact with the framework and master n
+ * @param ctx the environment to interact with the framework and master
*/
default void preUnassign(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo regionInfo) throws IOException {
@@ -501,7 +501,7 @@ public interface MasterObserver {
/**
* Called after the region unassignment has been requested.
- * @param ctx the environment to interact with the framework and master n
+ * @param ctx the environment to interact with the framework and master
*/
default void postUnassign(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo regionInfo) throws IOException {
@@ -509,7 +509,7 @@ public interface MasterObserver {
/**
* Called prior to marking a given region as offline.
- * @param ctx the environment to interact with the framework and master n
+ * @param ctx the environment to interact with the framework and master
*/
default void preRegionOffline(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo regionInfo) throws IOException {
@@ -517,7 +517,7 @@ public interface MasterObserver {
/**
* Called after the region has been marked offline.
- * @param ctx the environment to interact with the framework and master n
+ * @param ctx the environment to interact with the framework and master
*/
default void postRegionOffline(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo regionInfo) throws IOException {
@@ -597,7 +597,7 @@ public interface MasterObserver {
/**
* This will be called before update META step as part of split transaction.
- * @param ctx the environment to interact with the framework and master nn
+ * @param ctx the environment to interact with the framework and master
*/
default void preSplitRegionBeforeMETAAction(
final ObserverContext<MasterCoprocessorEnvironment> ctx, final byte[] splitKey,
@@ -1465,67 +1465,72 @@ public interface MasterObserver {
}
/**
- * Called before remove a replication peer n * @param peerId a short name that identifies the peer
+ * Called before remove a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void preRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
/**
- * Called after remove a replication peer n * @param peerId a short name that identifies the peer
+ * Called after remove a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void postRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
/**
- * Called before enable a replication peer n * @param peerId a short name that identifies the peer
+ * Called before enable a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void preEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
/**
- * Called after enable a replication peer n * @param peerId a short name that identifies the peer
+ * Called after enable a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void postEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
/**
- * Called before disable a replication peer n * @param peerId a short name that identifies the
- * peer
+ * Called before disable a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void preDisableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
/**
- * Called after disable a replication peer n * @param peerId a short name that identifies the peer
+ * Called after disable a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void postDisableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
/**
- * Called before get the configured ReplicationPeerConfig for the specified peer n * @param peerId
- * a short name that identifies the peer
+ * Called before get the configured ReplicationPeerConfig for the specified peer
+ * @param peerId a short name that identifies the peer
*/
default void preGetReplicationPeerConfig(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
/**
- * Called after get the configured ReplicationPeerConfig for the specified peer n * @param peerId
- * a short name that identifies the peer
+ * Called after get the configured ReplicationPeerConfig for the specified peer
+ * @param peerId a short name that identifies the peer
*/
default void postGetReplicationPeerConfig(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
/**
- * Called before update peerConfig for the specified peer n * @param peerId a short name that
- * identifies the peer
+ * Called before update peerConfig for the specified peer
+ * @param peerId a short name that identifies the peer
*/
default void preUpdateReplicationPeerConfig(
final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 057a9c56814..d37013534b1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -570,15 +570,15 @@ public interface RegionObserver {
/**
* This will be called for region operations where read lock is acquired in
- * {@link Region#startRegionOperation()}. n * @param operation The operation is about to be taken
- * on the region
+ * {@link Region#startRegionOperation()}.
+ * @param operation The operation is about to be taken on the region
*/
default void postStartRegionOperation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Operation operation) throws IOException {
}
/**
- * Called after releasing read lock in {@link Region#closeRegionOperation()}. nn
+ * Called after releasing read lock in {@link Region#closeRegionOperation()}.
*/
default void postCloseRegionOperation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Operation operation) throws IOException {
@@ -589,8 +589,8 @@ public interface RegionObserver {
* batch operation fails.
* <p>
* Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If
- * need a Cell reference for later use, copy the cell and use that. nn * @param success true if
- * batch operation is successful otherwise false.
+ * need a Cell reference for later use, copy the cell and use that.
+ * @param success true if batch operation is successful otherwise false.
*/
default void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> ctx,
MiniBatchOperationInProgress<Mutation> miniBatchOp, boolean success) throws IOException {
@@ -1463,8 +1463,8 @@ public interface RegionObserver {
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
- * @param size Full size of the file n * @param r original reference file. This will be not null
- * only when reading a split file.
+ * @param size Full size of the file
+ * @param r original reference file. This will be not null only when reading a split file.
* @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain
* @return a Reader instance to use instead of the base reader if overriding default behavior,
* null otherwise
@@ -1485,8 +1485,8 @@ public interface RegionObserver {
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
- * @param size Full size of the file n * @param r original reference file. This will be not null
- * only when reading a split file.
+ * @param size Full size of the file
+ * @param r original reference file. This will be not null only when reading a split file.
* @param reader the base reader instance
* @return The reader to use
* @deprecated For Phoenix only, StoreFileReader is not a stable interface.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
index 19fa8adc1e3..8c02b346f3c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
@@ -49,7 +49,7 @@ public class ForeignException extends IOException {
/**
* Create a new ForeignException that can be serialized. It is assumed that this came form a local
- * source. nn
+ * source.
*/
public ForeignException(String source, Throwable cause) {
super(cause);
@@ -60,7 +60,7 @@ public class ForeignException extends IOException {
/**
* Create a new ForeignException that can be serialized. It is assumed that this is locally
- * generated. nn
+ * generated.
*/
public ForeignException(String source, String msg) {
super(new IllegalArgumentException(msg));
@@ -146,8 +146,8 @@ public class ForeignException extends IOException {
}
/**
- * Takes a series of bytes and tries to generate an ForeignException instance for it. n * @return
- * the ForeignExcpetion instance
+ * Takes a series of bytes and tries to generate an ForeignException instance for it.
+ * @return the ForeignExcpetion instance
* @throws InvalidProtocolBufferException if there was deserialization problem this is thrown.
*/
public static ForeignException deserialize(byte[] bytes) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
index 3718900cc87..09fb78468dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
@@ -40,7 +40,7 @@ public interface ForeignExceptionSnare {
/**
* Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is no
- * exception this is a no-op n * all exceptions from remote sources are procedure exceptions
+ * exception this is a no-op all exceptions from remote sources are procedure exceptions
*/
void rethrowException() throws ForeignException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
index 94418f0c381..ece244fda4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
@@ -106,7 +106,7 @@ public abstract class EventHandler implements Runnable, Comparable<EventHandler>
}
/**
- * This method is the main processing loop to be implemented by the various subclasses. n
+ * This method is the main processing loop to be implemented by the various subclasses.
*/
public abstract void process() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
index c74388e5b92..6f5acca3f21 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
@@ -257,7 +257,7 @@ public class ExecutorService {
}
/**
- * Submit the event to the queue for handling. n
+ * Submit the event to the queue for handling.
*/
void submit(final EventHandler event) {
// If there is a listener for this type, make sure we call the before
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
index ed3986f5883..337fde60cf7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
@@ -67,23 +67,19 @@ public class Reference {
bottom
}
- /**
- * n * @return A {@link Reference} that points at top half of a an hfile
- */
+ /** Returns A {@link Reference} that points at top half of a an hfile */
public static Reference createTopReference(final byte[] splitRow) {
return new Reference(splitRow, Range.top);
}
- /**
- * n * @return A {@link Reference} that points at the bottom half of a an hfile
- */
+ /** Returns A {@link Reference} that points at the bottom half of a an hfile */
public static Reference createBottomReference(final byte[] splitRow) {
return new Reference(splitRow, Range.bottom);
}
/**
* Constructor
- * @param splitRow This is row we are splitting around. n
+ * @param splitRow This is row we are splitting around.
*/
Reference(final byte[] splitRow, final Range fr) {
this.splitkey = splitRow == null ? null : KeyValueUtil.createFirstOnRow(splitRow).getKey();
@@ -102,15 +98,13 @@ public class Reference {
}
/**
- * n
- */
+ * */
public Range getFileRegion() {
return this.region;
}
/**
- * n
- */
+ * */
public byte[] getSplitKey() {
return splitkey;
}
@@ -151,7 +145,8 @@ public class Reference {
}
/**
- * Read a Reference from FileSystem. nn * @return New Reference made from passed <code>p</code> n
+ * Read a Reference from FileSystem.
+ * @return New Reference made from passed <code>p</code>
*/
public static Reference read(final FileSystem fs, final Path p) throws IOException {
InputStream in = fs.open(p);
@@ -198,7 +193,7 @@ public class Reference {
/**
* Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the
* delimiter, pb reads to EOF which may not be what you want).
- * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n
+ * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
*/
byte[] toByteArray() throws IOException {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index 8419ccb6c1c..4e795ec75e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -95,7 +95,7 @@ public interface BlockCache extends Iterable<CachedBlock> {
int evictBlocksByHfileName(String hfileName);
/**
- * Get the statistics for this block cache. n
+ * Get the statistics for this block cache.
*/
CacheStats getStats();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index daa49d26a23..e6a4b609bc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -74,9 +74,7 @@ public class BlockCacheUtil {
}
}).setPrettyPrinting().create();
- /**
- * n * @return The block content as String.
- */
+ /** Returns The block content as String. */
public static String toString(final CachedBlock cb, final long now) {
return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now);
}
@@ -142,9 +140,7 @@ public class BlockCacheUtil {
return GSON.toJson(bc);
}
- /**
- * n * @return The block content of <code>bc</code> as a String minus the filename.
- */
+ /** Returns The block content of <code>bc</code> as a String minus the filename. */
public static String toStringMinusFileName(final CachedBlock cb, final long now) {
return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age="
+ (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority="
@@ -281,9 +277,7 @@ public class BlockCacheUtil {
new ConcurrentSkipListMap<>();
FastLongHistogram hist = new FastLongHistogram();
- /**
- * n * @return True if full.... if we won't be adding any more.
- */
+ /** Returns True if full.... if we won't be adding any more. */
public boolean update(final CachedBlock cb) {
if (isFull()) return true;
NavigableSet<CachedBlock> set = this.cachedBlockByFile.get(cb.getFilename());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java
index a90e04fe5ad..1b2fdc64197 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java
@@ -52,7 +52,7 @@ public interface BlockCompressedSizePredicator {
/**
* Decides if the block should be finished based on the comparison of its uncompressed size
* against an adjusted size based on a predicated compression factor.
- * @param uncompressed true if the block should be finished. n
+ * @param uncompressed true if the block should be finished.
*/
boolean shouldFinishBlock(int uncompressed);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 27f75e4eee6..ff796b2d4f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -202,7 +202,7 @@ public class CacheConfig {
}
/**
- * Constructs a cache configuration copied from the specified configuration. n
+ * Constructs a cache configuration copied from the specified configuration.
*/
public CacheConfig(CacheConfig cacheConf) {
this.cacheDataOnRead = cacheConf.cacheDataOnRead;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
index 2fe50381b77..4e5dfe34df6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
@@ -31,7 +31,7 @@ public interface CacheableDeserializer<T extends Cacheable> {
/**
* @param b ByteBuff to deserialize the Cacheable.
* @param allocator to manage NIO ByteBuffers for future allocation or de-allocation.
- * @return T the deserialized object. n
+ * @return T the deserialized object.
*/
T deserialize(ByteBuff b, ByteBuffAllocator allocator) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
index 2241a158efb..bb253e050fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
@@ -83,9 +83,9 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase
private BloomType bloomType;
/**
- * n * each chunk's size in bytes. The real chunk size might be different as required by the fold
- * factor. n * target false positive rate n * hash function type to use n * maximum degree of
- * folding allowed n * the bloom type
+ * each chunk's size in bytes. The real chunk size might be different as required by the fold
+ * factor. target false positive rate hash function type to use maximum degree of folding allowed
+ * the bloom type
*/
public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, int hashType,
int maxFold, boolean cacheOnWrite, CellComparator comparator, BloomType bloomType) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 6e72890be12..b5a5095c336 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -138,7 +138,7 @@ public class HFileBlockIndex {
}
/**
- * n * from 0 to {@link #getRootBlockCount() - 1}
+ * from 0 to {@link #getRootBlockCount() - 1}
*/
public byte[] getRootBlockKey(int i) {
return blockKeys[i];
@@ -256,7 +256,7 @@ public class HFileBlockIndex {
}
/**
- * n * from 0 to {@link #getRootBlockCount() - 1}
+ * from 0 to {@link #getRootBlockCount() - 1}
*/
public Cell getRootBlockKey(int i) {
return blockKeys[i];
@@ -521,7 +521,7 @@ public class HFileBlockIndex {
}
/**
- * n * from 0 to {@link #getRootBlockCount() - 1}
+ * from 0 to {@link #getRootBlockCount() - 1}
*/
public Cell getRootBlockKey(int i) {
return seeker.getRootBlockKey(i);
@@ -600,12 +600,12 @@ public class HFileBlockIndex {
/**
* Return the data block which contains this key. This function will only be called when the
* HFile version is larger than 1.
- * @param key the key we are looking for
- * @param currentBlock the current block, to avoid re-reading the same block nnn * @param
- * expectedDataBlockEncoding the data block encoding the caller is expecting
- * the data block to be in, or null to not perform this check and return the
- * block irrespective of the encoding
- * @return reader a basic way to load blocks n
+ * @param key the key we are looking for
+ * @param currentBlock the current block, to avoid re-reading the same block
+ * @param expectedDataBlockEncoding the data block encoding the caller is expecting the data
+ * block to be in, or null to not perform this check and return
+ * the block irrespective of the encoding
+ * @return reader a basic way to load blocks
*/
public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks,
boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding,
@@ -629,7 +629,7 @@ public class HFileBlockIndex {
* block to be in, or null to not perform this check and return
* the block irrespective of the encoding.
* @return the BlockWithScanInfo which contains the DataBlock with other scan info such as
- * nextIndexedKey. n
+ * nextIndexedKey.
*/
public abstract BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock,
boolean cacheBlocks, boolean pread, boolean isCompaction,
@@ -665,8 +665,8 @@ public class HFileBlockIndex {
}
/**
- * Finds the root-level index block containing the given key. n * Key to find n * the comparator
- * to be used
+ * Finds the root-level index block containing the given key. Key to find the comparator to be
+ * used
* @return Offset of block containing <code>key</code> (between 0 and the number of blocks - 1)
* or -1 if this file does not contain the request.
*/
@@ -677,7 +677,7 @@ public class HFileBlockIndex {
CellComparator comp);
/**
- * Finds the root-level index block containing the given key. n * Key to find
+ * Finds the root-level index block containing the given key. Key to find
* @return Offset of block containing <code>key</code> (between 0 and the number of blocks - 1)
* or -1 if this file does not contain the request.
*/
@@ -690,13 +690,13 @@ public class HFileBlockIndex {
}
/**
- * Finds the root-level index block containing the given key. n * Key to find
+ * Finds the root-level index block containing the given key. Key to find
*/
public abstract int rootBlockContainingKey(final Cell key);
/**
- * The indexed key at the ith position in the nonRootIndex. The position starts at 0. n * @param
- * i the ith position
+ * The indexed key at the ith position in the nonRootIndex. The position starts at 0.
+ * @param i the ith position
* @return The indexed key at the ith position in the nonRootIndex.
*/
protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) {
@@ -728,11 +728,11 @@ public class HFileBlockIndex {
/**
* Performs a binary search over a non-root level index block. Utilizes the secondary index,
- * which records the offsets of (offset, onDiskSize, firstKey) tuples of all entries. n * the
- * key we are searching for offsets to individual entries in the blockIndex buffer n * the
- * non-root index block buffer, starting with the secondary index. The position is ignored.
+ * which records the offsets of (offset, onDiskSize, firstKey) tuples of all entries. the key we
+ * are searching for offsets to individual entries in the blockIndex buffer the non-root index
+ * block buffer, starting with the secondary index. The position is ignored.
* @return the index i in [0, numEntries - 1] such that keys[i] <= key < keys[i + 1], if keys is
- * the array of all keys being searched, or -1 otherwise n
+ * the array of all keys being searched, or -1 otherwise
*/
static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex,
CellComparator comparator) {
@@ -809,8 +809,8 @@ public class HFileBlockIndex {
/**
* Search for one key using the secondary index in a non-root block. In case of success,
* positions the provided buffer at the entry of interest, where the file offset and the
- * on-disk-size can be read. n * a non-root block without header. Initial position does not
- * matter. n * the byte array containing the key
+ * on-disk-size can be read. a non-root block without header. Initial position does not matter.
+ * the byte array containing the key
* @return the index position where the given key was found, otherwise return -1 in the case the
* given key is before the first key.
*/
@@ -838,7 +838,7 @@ public class HFileBlockIndex {
* the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset
* that function returned.
* @param in the buffered input stream or wrapped byte input stream
- * @param numEntries the number of root-level index entries n
+ * @param numEntries the number of root-level index entries
*/
public void readRootIndex(DataInput in, final int numEntries) throws IOException {
blockOffsets = new long[numEntries];
@@ -866,7 +866,7 @@ public class HFileBlockIndex {
* that function returned.
* @param blk the HFile block
* @param numEntries the number of root-level index entries
- * @return the buffered input stream or wrapped byte input stream n
+ * @return the buffered input stream or wrapped byte input stream
*/
public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throws IOException {
DataInputStream in = blk.getByteStream();
@@ -879,7 +879,7 @@ public class HFileBlockIndex {
* {@link #readRootIndex(DataInput, int)}, but also reads metadata necessary to compute the
* mid-key in a multi-level index.
* @param blk the HFile block
- * @param numEntries the number of root-level index entries n
+ * @param numEntries the number of root-level index entries
*/
public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException {
DataInputStream in = readRootIndex(blk, numEntries);
@@ -1040,7 +1040,7 @@ public class HFileBlockIndex {
* there is no inline block index anymore, so we only write that level of block index to disk as
* the root level.
* @param out FSDataOutputStream
- * @return position at which we entered the root-level index. n
+ * @return position at which we entered the root-level index.
*/
public long writeIndexBlocks(FSDataOutputStream out) throws IOException {
if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) {
@@ -1100,7 +1100,7 @@ public class HFileBlockIndex {
* Writes the block index data as a single level only. Does not do any block framing.
* @param out the buffered output stream to write the index to. Typically a stream
* writing into an {@link HFile} block.
- * @param description a short description of the index being written. Used in a log message. n
+ * @param description a short description of the index being written. Used in a log message.
*/
public void writeSingleLevelIndex(DataOutput out, String description) throws IOException {
expectNumLevels(1);
@@ -1123,10 +1123,11 @@ public class HFileBlockIndex {
/**
* Split the current level of the block index into intermediate index blocks of permitted size
* and write those blocks to disk. Return the next level of the block index referencing those
- * intermediate-level blocks. n * @param currentLevel the current level of the block index, such
- * as the a chunk referencing all leaf-level index blocks
+ * intermediate-level blocks.
+ * @param currentLevel the current level of the block index, such as the a chunk referencing all
+ * leaf-level index blocks
* @return the parent level block index, which becomes the root index after a few (usually zero)
- * iterations n
+ * iterations
*/
private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out,
BlockIndexChunk currentLevel) throws IOException {
@@ -1245,7 +1246,7 @@ public class HFileBlockIndex {
/**
* Write out the current inline index block. Inline blocks are non-root blocks, so the non-root
- * index format is used. n
+ * index format is used.
*/
@Override
public void writeInlineBlock(DataOutput out) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
index cb2d5bbcfb6..1629536c148 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
@@ -39,20 +39,20 @@ public interface HFileDataBlockEncoder {
/**
* Starts encoding for a block of KeyValues. Call
* {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[], BlockType)} to
- * finish encoding of a block. nnn
+ * finish encoding of a block.
*/
void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out)
throws IOException;
/**
- * Encodes a KeyValue. nnnn
+ * Encodes a KeyValue.
*/
void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
throws IOException;
/**
* Ends encoding for a block of KeyValues. Gives a chance for the encoder to do the finishing
- * stuff for the encoded block. It must be called at the end of block encoding. nnnnn
+ * stuff for the encoded block. It must be called at the end of block encoding.
*/
void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out,
byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 1caca6abf4e..df58c94464a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -516,8 +516,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
* Within a loaded block, seek looking for the last key that is smaller than (or equal to?) the
* key we are interested in. A note on the seekBefore: if you have seekBefore = true, AND the
* first key in the block = key, then you'll get thrown exceptions. The caller has to check for
- * that case and load the previous block as appropriate. n * the key to find n * find the key
- * before the given key in case of exact match.
+ * that case and load the previous block as appropriate. the key to find find the key before the
+ * given key in case of exact match.
* @return 0 in case of an exact key match, 1 in case of an inexact match, -2 in case of an
* inexact match and furthermore, the input key less than the first key of current
* block(e.g. using a faked index key)
@@ -1641,10 +1641,10 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
/**
* Create a Scanner on this file. No seeks or reads are done on creation. Call
* {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up
- * in a Scanner. Letting go of your references to the scanner is sufficient. n * Store
- * configuration. n * True if we should cache blocks read in by this scanner. n * Use positional
- * read rather than seek+read if true (pread is better for random reads, seek+read is better
- * scanning). n * is scanner being used for a compaction?
+ * in a Scanner. Letting go of your references to the scanner is sufficient. Store configuration.
+ * True if we should cache blocks read in by this scanner. Use positional read rather than
+ * seek+read if true (pread is better for random reads, seek+read is better scanning). is scanner
+ * being used for a compaction?
* @return Scanner on this file.
*/
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
index e77b133523f..fd5c66b126b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
@@ -39,12 +39,13 @@ public interface HFileScanner extends Shipper, Closeable {
/**
* SeekTo or just before the passed <code>cell</code>. Examine the return code to figure whether
* we found the cell or not. Consider the cell stream of all the cells in the file,
- * <code>c[0] .. c[n]</code>, where there are n cells in the file. n * @return -1, if cell <
- * c[0], no position; 0, such that c[i] = cell and scanner is left in position i; and 1, such that
- * c[i] < cell, and scanner is left in position i. The scanner will position itself between
- * c[i] and c[i+1] where c[i] < cell <= c[i+1]. If there is no cell c[i+1] greater than or
- * equal to the input cell, then the scanner will position itself at the end of the file and
- * next() will return false when it is called. n
+ * <code>c[0] .. c[n]</code>, where there are n cells in the file.
+ * @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in
+ * position i; and 1, such that c[i] < cell, and scanner is left in position i. The
+ * scanner will position itself between c[i] and c[i+1] where c[i] < cell <= c[i+1].
+ * If there is no cell c[i+1] greater than or equal to the input cell, then the scanner
+ * will position itself at the end of the file and next() will return false when it is
+ * called.
*/
int seekTo(Cell cell) throws IOException;
@@ -59,7 +60,7 @@ public interface HFileScanner extends Shipper, Closeable {
* false when it is called.
* @param cell Cell to find (should be non-null)
* @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in
- * position i; and 1, such that c[i] < cell, and scanner is left in position i. n
+ * position i; and 1, such that c[i] < cell, and scanner is left in position i.
*/
int reseekTo(Cell cell) throws IOException;
@@ -69,20 +70,20 @@ public interface HFileScanner extends Shipper, Closeable {
* @param cell Cell to find
* @return false if cell <= c[0] or true with scanner in position 'i' such that: c[i] <
* cell. Furthermore: there may be a c[i+1], such that c[i] < cell <= c[i+1] but
- * there may also NOT be a c[i+1], and next() will return false (EOF). n
+ * there may also NOT be a c[i+1], and next() will return false (EOF).
*/
boolean seekBefore(Cell cell) throws IOException;
/**
* Positions this scanner at the start of the file.
* @return False if empty file; i.e. a call to next would return false and the current key and
- * value are undefined. n
+ * value are undefined.
*/
boolean seekTo() throws IOException;
/**
* Scans to the next entry in the file.
- * @return Returns false if you are at the end otherwise true if more in file. n
+ * @return Returns false if you are at the end otherwise true if more in file.
*/
boolean next() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index b33d471ae49..eda5cde46a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -574,8 +574,8 @@ public class HFileWriterImpl implements HFile.Writer {
* Add a meta block to the end of the file. Call before close(). Metadata blocks are expensive.
* Fill one with a bunch of serialized data rather than do a metadata block per metadata instance.
* If metadata is small, consider adding to file info using
- * {@link #appendFileInfo(byte[], byte[])} n * name of the block n * will call readFields to get
- * data later (DO NOT REUSE)
+ * {@link #appendFileInfo(byte[], byte[])} name of the block will call readFields to get data
+ * later (DO NOT REUSE)
*/
@Override
public void appendMetaBlock(String metaBlockName, Writable content) {
@@ -723,7 +723,7 @@ public class HFileWriterImpl implements HFile.Writer {
/**
* Add key/value to file. Keys must be added in an order that agrees with the Comparator passed on
- * construction. n * Cell to add. Cannot be empty nor null.
+ * construction. Cell to add. Cannot be empty nor null.
*/
@Override
public void append(final Cell cell) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java
index 6b0c913ca1b..58229639309 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java
@@ -30,16 +30,15 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface InlineBlockWriter {
/**
- * Determines whether there is a new block to be written out. n * whether the file is being
- * closed, in which case we need to write out all available data and not wait to accumulate
- * another block
+ * Determines whether there is a new block to be written out. whether the file is being closed, in
+ * which case we need to write out all available data and not wait to accumulate another block
*/
boolean shouldWriteBlock(boolean closing);
/**
* Writes the block to the provided stream. Must not write any magic records. Called only if
- * {@link #shouldWriteBlock(boolean)} returned true. n * a stream (usually a compressing stream)
- * to write the block to
+ * {@link #shouldWriteBlock(boolean)} returned true. a stream (usually a compressing stream) to
+ * write the block to
*/
void writeInlineBlock(DataOutput out) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java
index 18d21deceb2..9e480247ee9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java
@@ -66,7 +66,7 @@ public class NoOpIndexBlockEncoder implements HFileIndexBlockEncoder {
/**
* Writes the block index chunk in the non-root index block format. This format contains the
* number of entries, an index of integer offsets for quick binary search on variable-length
- * records, and tuples of block offset, on-disk block size, and the first key for each entry. nn
+ * records, and tuples of block offset, on-disk block size, and the first key for each entry.
*/
private void writeNonRoot(BlockIndexChunk blockIndexChunk, DataOutput out) throws IOException {
// The number of entries in the block.
@@ -103,7 +103,7 @@ public class NoOpIndexBlockEncoder implements HFileIndexBlockEncoder {
* similar to the {@link HFile} version 1 block index format, except that we store on-disk size of
* the block instead of its uncompressed size.
* @param out the data output stream to write the block index to. Typically a stream writing into
- * an {@link HFile} block. n
+ * an {@link HFile} block.
*/
private void writeRoot(BlockIndexChunk blockIndexChunk, DataOutput out) throws IOException {
for (int i = 0; i < blockIndexChunk.getNumEntries(); ++i) {
@@ -443,8 +443,8 @@ public class NoOpIndexBlockEncoder implements HFileIndexBlockEncoder {
}
/**
- * The indexed key at the ith position in the nonRootIndex. The position starts at 0. n * @param
- * i the ith position
+ * The indexed key at the ith position in the nonRootIndex. The position starts at 0.
+ * @param i the ith position
* @return The indexed key at the ith position in the nonRootIndex.
*/
protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java
index be0ee3bb9a7..c308874951e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java
@@ -50,7 +50,7 @@ public class PreviousBlockCompressionRatePredicator implements BlockCompressedSi
/**
* Returns <b>true</b> if the passed uncompressed size is larger than the limit calculated by
* <code>updateLatestBlockSizes</code>.
- * @param uncompressed true if the block should be finished. n
+ * @param uncompressed true if the block should be finished.
*/
@Override
public boolean shouldFinishBlock(int uncompressed) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java
index c259375a97d..cf994ad51ce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java
@@ -39,7 +39,7 @@ public class UncompressedBlockSizePredicator implements BlockCompressedSizePredi
/**
* Dummy implementation that always returns true. This means, we will be only considering the
* block uncompressed size for deciding when to finish a block.
- * @param uncompressed true if the block should be finished. n
+ * @param uncompressed true if the block should be finished.
*/
@Override
public boolean shouldFinishBlock(int uncompressed) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index 54032e79c6f..0b03656d701 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -348,7 +348,7 @@ public final class BucketAllocator {
* @param availableSpace capacity of cache
* @param map A map stores the block key and BucketEntry(block's meta data like offset,
* length)
- * @param realCacheSize cached data size statistics for bucket cache n
+ * @param realCacheSize cached data size statistics for bucket cache
*/
BucketAllocator(long availableSpace, int[] bucketSizes, Map<BlockCacheKey, BucketEntry> map,
LongAdder realCacheSize) throws BucketAllocatorException {
@@ -444,7 +444,8 @@ public final class BucketAllocator {
/**
* Allocate a block with specified size. Return the offset
- * @param blockSize size of block nn * @return the offset in the IOEngine
+ * @param blockSize size of block
+ * @return the offset in the IOEngine
*/
public synchronized long allocateBlock(int blockSize)
throws CacheFullException, BucketAllocatorException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 18295f285c4..6849b176f72 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -384,7 +384,8 @@ public class BucketCache implements BlockCache, HeapSize {
}
/**
- * Get the IOEngine from the IO engine name nnn * @return the IOEngine n
+ * Get the IOEngine from the IO engine name
+ * @return the IOEngine
*/
private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String persistencePath)
throws IOException {
@@ -1581,7 +1582,7 @@ public class BucketCache implements BlockCache, HeapSize {
}
/**
- * Only used in test n
+ * Only used in test
*/
void stopWriterThreads() throws InterruptedException {
for (WriterThread writerThread : writerThreads) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
index 78166e88ffd..6dc3742a660 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
@@ -64,8 +64,8 @@ public class ByteBufferIOEngine implements IOEngine {
private final long capacity;
/**
- * Construct the ByteBufferIOEngine with the given capacity n * @throws IOException ideally here
- * no exception to be thrown from the allocator
+ * Construct the ByteBufferIOEngine with the given capacity
+ * @throws IOException ideally here no exception to be thrown from the allocator
*/
public ByteBufferIOEngine(long capacity) throws IOException {
this.capacity = capacity;
@@ -80,7 +80,7 @@ public class ByteBufferIOEngine implements IOEngine {
}
/**
- * Memory IO engine is always unable to support persistent storage for the cache n
+ * Memory IO engine is always unable to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index 511d8afff46..370343b1b25 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -110,7 +110,7 @@ public class FileIOEngine extends PersistentIOEngine {
}
/**
- * File IO engine is always able to support persistent storage for the cache n
+ * File IO engine is always able to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
@@ -162,7 +162,7 @@ public class FileIOEngine extends PersistentIOEngine {
/**
* Transfers data from the given byte buffer to file
* @param srcBuffer the given byte buffer from which bytes are to be read
- * @param offset The offset in the file where the first byte to be written n
+ * @param offset The offset in the file where the first byte to be written
*/
@Override
public void write(ByteBuffer srcBuffer, long offset) throws IOException {
@@ -170,7 +170,7 @@ public class FileIOEngine extends PersistentIOEngine {
}
/**
- * Sync the data to file after writing n
+ * Sync the data to file after writing
*/
@Override
public void sync() throws IOException {
@@ -254,8 +254,8 @@ public class FileIOEngine extends PersistentIOEngine {
}
/**
- * Get the absolute offset in given file with the relative global offset. nn * @return the
- * absolute offset
+ * Get the absolute offset in given file with the relative global offset.
+ * @return the absolute offset
*/
private long getAbsoluteOffsetInFile(int fileNum, long globalOffset) {
return globalOffset - fileNum * sizePerFile;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
index b09e0963ca2..b7066f149fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
@@ -95,7 +95,7 @@ public abstract class FileMmapIOEngine extends PersistentIOEngine {
}
/**
- * File IO engine is always able to support persistent storage for the cache n
+ * File IO engine is always able to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
@@ -109,7 +109,7 @@ public abstract class FileMmapIOEngine extends PersistentIOEngine {
/**
* Transfers data from the given byte buffer to file
* @param srcBuffer the given byte buffer from which bytes are to be read
- * @param offset The offset in the file where the first byte to be written n
+ * @param offset The offset in the file where the first byte to be written
*/
@Override
public void write(ByteBuffer srcBuffer, long offset) throws IOException {
@@ -122,7 +122,7 @@ public abstract class FileMmapIOEngine extends PersistentIOEngine {
}
/**
- * Sync the data to file after writing n
+ * Sync the data to file after writing
*/
@Override
public void sync() throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
index a7b73a5d886..46112da3f2c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
@@ -52,19 +52,19 @@ public interface IOEngine {
/**
* Transfers data from the given byte buffer to IOEngine
* @param srcBuffer the given byte buffer from which bytes are to be read
- * @param offset The offset in the IO engine where the first byte to be written n
+ * @param offset The offset in the IO engine where the first byte to be written
*/
void write(ByteBuffer srcBuffer, long offset) throws IOException;
/**
* Transfers the data from the given MultiByteBuffer to IOEngine
* @param srcBuffer the given MultiBytebufffers from which bytes are to be read
- * @param offset the offset in the IO engine where the first byte to be written n
+ * @param offset the offset in the IO engine where the first byte to be written
*/
void write(ByteBuff srcBuffer, long offset) throws IOException;
/**
- * Sync the data to IOEngine after writing n
+ * Sync the data to IOEngine after writing
*/
void sync() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
index 85bcc643558..bf014dfb530 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
@@ -77,7 +77,7 @@ public class MemorySizeUtil {
/**
* Checks whether we have enough heap memory left out after portion for Memstore and Block cache.
- * We need atleast 20% of heap left out for other RS functions. n
+ * We need atleast 20% of heap left out for other RS functions.
*/
public static void checkForClusterFreeHeapMemoryLimit(Configuration conf) {
if (conf.get(MEMSTORE_SIZE_OLD_KEY) != null) {
@@ -102,7 +102,7 @@ public class MemorySizeUtil {
}
/**
- * Retrieve global memstore configured size as percentage of total heap. nn
+ * Retrieve global memstore configured size as percentage of total heap.
*/
public static float getGlobalMemStoreHeapPercent(final Configuration c,
final boolean logInvalid) {
@@ -178,7 +178,8 @@ public class MemorySizeUtil {
/**
* Returns the onheap global memstore limit based on the config
- * 'hbase.regionserver.global.memstore.size'. n * @return the onheap global memstore limt
+ * 'hbase.regionserver.global.memstore.size'.
+ * @return the onheap global memstore limt
*/
public static long getOnheapGlobalMemStoreSize(Configuration conf) {
long max = -1L;
@@ -191,7 +192,7 @@ public class MemorySizeUtil {
}
/**
- * Retrieve configured size for on heap block cache as percentage of total heap. n
+ * Retrieve configured size for on heap block cache as percentage of total heap.
*/
public static float getBlockCacheHeapPercent(final Configuration conf) {
// L1 block cache is always on heap
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java
index ef19dea2dfb..2c9fb0b2a2e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java
@@ -34,13 +34,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader
public interface PriorityFunction {
/**
* Returns the 'priority type' of the specified request. The returned value is mainly used to
- * select the dispatch queue. nnn * @return Priority of this request.
+ * select the dispatch queue.
+ * @return Priority of this request.
*/
int getPriority(RequestHeader header, Message param, User user);
/**
* Returns the deadline of the specified request. The returned value is used to sort the dispatch
- * queue. nn * @return Deadline of this request. 0 now, otherwise msec of 'delay'
+ * queue.
+ * @return Deadline of this request. 0 now, otherwise msec of 'delay'
*/
long getDeadline(RequestHeader header, Message param);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
index 59008cb08a4..197ddb71d7e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
@@ -96,7 +96,7 @@ public interface RpcCall extends RpcCallContext {
/**
* Send the response of this RPC call. Implementation provides the underlying facility
- * (connection, etc) to send. n
+ * (connection, etc) to send.
*/
void sendResponseIfReady() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
index 95843652abe..479a83f914a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
@@ -68,7 +68,7 @@ public interface RpcCallContext {
/**
* Sets a callback which has to be executed at the end of this RPC call. Such a callback is an
- * optional one for any Rpc call. n
+ * optional one for any Rpc call.
*/
void setCallBack(RpcCallback callback);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java
index bab3e80d322..d8605bb122e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java
@@ -25,8 +25,7 @@ class RpcSchedulerContext extends RpcScheduler.Context {
private final RpcServer rpcServer;
/**
- * n
- */
+ * */
RpcSchedulerContext(final RpcServer rpcServer) {
this.rpcServer = rpcServer;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 15caac476f3..6e4b5ef42f2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -255,11 +255,13 @@ public abstract class RpcServer implements RpcServerInterface, ConfigurationObse
/**
* Constructs a server listening on the named port and address.
- * @param server hosting instance of {@link Server}. We will do authentications if an
- * instance else pass null for no authentication check.
- * @param name Used keying this rpc servers' metrics and for naming the Listener thread.
- * @param services A list of services.
- * @param bindAddress Where to listen nn * @param reservoirEnabled Enable ByteBufferPool or not.
+ * @param server hosting instance of {@link Server}. We will do authentications if an
+ * instance else pass null for no authentication check.
+ * @param name Used keying this rpc servers' metrics and for naming the Listener
+ * thread.
+ * @param services A list of services.
+ * @param bindAddress Where to listen
+ * @param reservoirEnabled Enable ByteBufferPool or not.
*/
public RpcServer(final Server server, final String name,
final List<BlockingServiceAndInterface> services, final InetSocketAddress bindAddress,
@@ -776,7 +778,6 @@ public abstract class RpcServer implements RpcServerInterface, ConfigurationObse
/**
* Returns the remote side ip address when invoked inside an RPC Returns null incase of an error.
- * n
*/
public static InetAddress getRemoteIp() {
RpcCall call = CurCall.get();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
index 80549067972..2c0dd1cc2b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
@@ -64,7 +64,7 @@ public interface RpcServerInterface {
void addCallSize(long diff);
/**
- * Refresh authentication manager policy. n
+ * Refresh authentication manager policy.
*/
void refreshAuthManager(Configuration conf, PolicyProvider pp);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
index efb6630ad9e..b09f33c47f9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
@@ -167,7 +167,7 @@ abstract class ServerRpcConnection implements Closeable {
}
/**
- * Set up cell block codecs n
+ * Set up cell block codecs
*/
private void setupCellBlockCodecs() throws FatalConnectionException {
// TODO: Plug in other supported decoders.
@@ -500,8 +500,8 @@ abstract class ServerRpcConnection implements Closeable {
protected abstract void doRespond(RpcResponse resp) throws IOException;
/**
- * n * Has the request header and the request param and optionally encoded data buffer all in this
- * one array.
+ * Has the request header and the request param and optionally encoded data buffer all in this one
+ * array.
* <p/>
* Will be overridden in tests.
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
index 78ff4bf69d1..92b38757031 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
@@ -57,10 +57,10 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs
private Abortable abortable = null;
/**
- * n * @param handlerCount the number of handler threads that will be used to process calls
+ * @param handlerCount the number of handler threads that will be used to process calls
* @param priorityHandlerCount How many threads for priority handling.
- * @param replicationHandlerCount How many threads for replication handling. n * @param priority
- * Function to extract request priority.
+ * @param replicationHandlerCount How many threads for replication handling.
+ * @param priority Function to extract request priority.
*/
public SimpleRpcScheduler(Configuration conf, int handlerCount, int priorityHandlerCount,
int replicationHandlerCount, int metaTransitionHandler, PriorityFunction priority,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
index 49c861b14ff..cb157a592a9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -354,11 +354,13 @@ public class SimpleRpcServer extends RpcServer {
/**
* Constructs a server listening on the named port and address.
- * @param server hosting instance of {@link Server}. We will do authentications if an
- * instance else pass null for no authentication check.
- * @param name Used keying this rpc servers' metrics and for naming the Listener thread.
- * @param services A list of services.
- * @param bindAddress Where to listen nn * @param reservoirEnabled Enable ByteBufferPool or not.
+ * @param server hosting instance of {@link Server}. We will do authentications if an
+ * instance else pass null for no authentication check.
+ * @param name Used keying this rpc servers' metrics and for naming the Listener
+ * thread.
+ * @param services A list of services.
+ * @param bindAddress Where to listen
+ * @param reservoirEnabled Enable ByteBufferPool or not.
*/
public SimpleRpcServer(final Server server, final String name,
final List<BlockingServiceAndInterface> services, final InetSocketAddress bindAddress,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
index db1b380361d..cbf023e2ba9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
@@ -245,7 +245,7 @@ class SimpleRpcServerResponder extends Thread {
/**
* Process the response for this call. You need to have the lock on
* {@link org.apache.hadoop.hbase.ipc.SimpleServerRpcConnection#responseWriteLock}
- * @return true if we proceed the call fully, false otherwise. n
+ * @return true if we proceed the call fully, false otherwise.
*/
private boolean processResponse(SimpleServerRpcConnection conn, RpcResponse resp)
throws IOException {
@@ -283,8 +283,8 @@ class SimpleRpcServerResponder extends Thread {
/**
* Process all the responses for this connection
- * @return true if all the calls were processed or that someone else is doing it. false if there *
- * is still some work to do. In this case, we expect the caller to delay us. n
+ * @return true if all the calls were processed or that someone else is doing it. false if there
+ * is still some work to do. In this case, we expect the caller to delay us.
*/
private boolean processAllResponses(final SimpleServerRpcConnection connection)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
index 4c8925d7274..ac705d7a26f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
@@ -260,7 +260,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
/**
* Read off the wire. If there is not enough data to read, update the connection state with what
* we have and returns.
- * @return Returns -1 if failure (and caller will close connection), else zero or more. nn
+ * @return Returns -1 if failure (and caller will close connection), else zero or more.
*/
public int readAndProcess() throws IOException, InterruptedException {
// If we have not read the connection setup preamble, look to see if that is on the wire.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java
index 28795eab28e..41f5709e911 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java
@@ -64,7 +64,7 @@ public class DrainingServerTracker extends ZKListener {
/**
* Starts the tracking of draining RegionServers.
* <p>
- * All Draining RSs will be tracked after this method is called. n
+ * All Draining RSs will be tracked after this method is called.
*/
public void start() throws KeeperException, IOException {
watcher.registerListener(this);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index c5c0a7cb7e5..7dc08d76aba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3261,7 +3261,8 @@ public class HMaster extends HBaseServerBase<MasterRpcServices> implements Maste
}
/**
- * Utility for constructing an instance of the passed HMaster class. n * @return HMaster instance.
+ * Utility for constructing an instance of the passed HMaster class.
+ * @return HMaster instance.
*/
public static HMaster constructMaster(Class<? extends HMaster> masterClass,
final Configuration conf) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 6295fa63d50..493d0e3ef86 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -813,7 +813,7 @@ public class MasterCoprocessorHost
/**
* Invoked just before calling the split region procedure
* @param tableName the table where the region belongs to
- * @param splitRow the split point n
+ * @param splitRow the split point
*/
public void preSplitRegion(final TableName tableName, final byte[] splitRow) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
@@ -828,7 +828,7 @@ public class MasterCoprocessorHost
* Invoked just before a split
* @param tableName the table where the region belongs to
* @param splitRow the split point
- * @param user the user n
+ * @param user the user
*/
public void preSplitRegionAction(final TableName tableName, final byte[] splitRow,
final User user) throws IOException {
@@ -844,7 +844,7 @@ public class MasterCoprocessorHost
* Invoked just after a split
* @param regionInfoA the new left-hand daughter region
* @param regionInfoB the new right-hand daughter region
- * @param user the user n
+ * @param user the user
*/
public void postCompletedSplitRegionAction(final RegionInfo regionInfoA,
final RegionInfo regionInfoB, final User user) throws IOException {
@@ -857,8 +857,8 @@ public class MasterCoprocessorHost
}
/**
- * This will be called before update META step as part of split table region procedure. nn
- * * @param user the user n
+ * This will be called before update META step as part of split table region procedure.
+ * @param user the user
*/
public void preSplitBeforeMETAAction(final byte[] splitKey, final List<Mutation> metaEntries,
final User user) throws IOException {
@@ -872,7 +872,7 @@ public class MasterCoprocessorHost
/**
* This will be called after update META step as part of split table region procedure.
- * @param user the user n
+ * @param user the user
*/
public void preSplitAfterMETAAction(final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@@ -885,7 +885,7 @@ public class MasterCoprocessorHost
/**
* Invoked just after the rollback of a failed split
- * @param user the user n
+ * @param user the user
*/
public void postRollBackSplitRegionAction(final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@@ -899,7 +899,7 @@ public class MasterCoprocessorHost
/**
* Invoked just before a merge
* @param regionsToMerge the regions to merge
- * @param user the user n
+ * @param user the user
*/
public void preMergeRegionsAction(final RegionInfo[] regionsToMerge, final User user)
throws IOException {
@@ -915,7 +915,7 @@ public class MasterCoprocessorHost
* Invoked after completing merge regions operation
* @param regionsToMerge the regions to merge
* @param mergedRegion the new merged region
- * @param user the user n
+ * @param user the user
*/
public void postCompletedMergeRegionsAction(final RegionInfo[] regionsToMerge,
final RegionInfo mergedRegion, final User user) throws IOException {
@@ -931,7 +931,7 @@ public class MasterCoprocessorHost
* Invoked before merge regions operation writes the new region to hbase:meta
* @param regionsToMerge the regions to merge
* @param metaEntries the meta entry
- * @param user the user n
+ * @param user the user
*/
public void preMergeRegionsCommit(final RegionInfo[] regionsToMerge,
final @MetaMutationAnnotation List<Mutation> metaEntries, final User user) throws IOException {
@@ -947,7 +947,7 @@ public class MasterCoprocessorHost
* Invoked after merge regions operation writes the new region to hbase:meta
* @param regionsToMerge the regions to merge
* @param mergedRegion the new merged region
- * @param user the user n
+ * @param user the user
*/
public void postMergeRegionsCommit(final RegionInfo[] regionsToMerge,
final RegionInfo mergedRegion, final User user) throws IOException {
@@ -962,7 +962,7 @@ public class MasterCoprocessorHost
/**
* Invoked after rollback merge regions operation
* @param regionsToMerge the regions to merge
- * @param user the user n
+ * @param user the user
*/
public void postRollBackMergeRegionsAction(final RegionInfo[] regionsToMerge, final User user)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index a5a0b5d629f..5a43cd98feb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -306,7 +306,7 @@ public class MasterFileSystem {
}
/**
- * Make sure the directories under rootDir have good permissions. Create if necessary. nn
+ * Make sure the directories under rootDir have good permissions. Create if necessary.
*/
private void checkSubDir(final Path p, final String dirPermsConfName) throws IOException {
FileSystem fs = p.getFileSystem(conf);
@@ -335,7 +335,7 @@ public class MasterFileSystem {
/**
* Check permissions for bulk load staging directory. This directory has special hidden
- * permissions. Create it if necessary. n
+ * permissions. Create it if necessary.
*/
private void checkStagingDir() throws IOException {
Path p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 1ac42008df1..4a490b1e127 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1110,7 +1110,7 @@ public class MasterRpcServices extends HBaseRpcServicesBase<HMaster>
/**
* Get the number of regions of the table that have been updated by the alter.
* @return Pair indicating the number of regions updated Pair.getFirst is the regions that are yet
- * to be updated Pair.getSecond is the total number of regions of the table n
+ * to be updated Pair.getSecond is the total number of regions of the table
*/
@Override
public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller,
@@ -1137,7 +1137,7 @@ public class MasterRpcServices extends HBaseRpcServicesBase<HMaster>
* Get list of TableDescriptors for requested tables.
* @param c Unused (set to null).
* @param req GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if
- * empty, all are requested. nn
+ * empty, all are requested.
*/
@Override
public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
@@ -1174,7 +1174,7 @@ public class MasterRpcServices extends HBaseRpcServicesBase<HMaster>
/**
* Get list of userspace table names
* @param controller Unused (set to null).
- * @param req GetTableNamesRequest nn
+ * @param req GetTableNamesRequest
*/
@Override
public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index fbeb155a88b..c84a58b7771 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -116,7 +116,7 @@ public interface MasterServices extends Server {
/**
* Check table is modifiable; i.e. exists and is offline.
- * @param tableName Name of table to check. nnn
+ * @param tableName Name of table to check.
*/
// We actually throw the exceptions mentioned in the
void checkTableModifiable(final TableName tableName)
@@ -125,8 +125,8 @@ public interface MasterServices extends Server {
/**
* Create a table using the given table definition.
* @param desc The table definition
- * @param splitKeys Starting row keys for the initial table regions. If null nn * a single region
- * is created.
+ * @param splitKeys Starting row keys for the initial table regions. If null a single region is
+ * created.
*/
long createTable(final TableDescriptor desc, final byte[][] splitKeys, final long nonceGroup,
final long nonce) throws IOException;
@@ -139,7 +139,7 @@ public interface MasterServices extends Server {
/**
* Delete a table
- * @param tableName The table name nnn
+ * @param tableName The table name
*/
long deleteTable(final TableName tableName, final long nonceGroup, final long nonce)
throws IOException;
@@ -147,7 +147,7 @@ public interface MasterServices extends Server {
/**
* Truncate a table
* @param tableName The table name
- * @param preserveSplits True if the splits should be preserved nnn
+ * @param preserveSplits True if the splits should be preserved
*/
public long truncateTable(final TableName tableName, final boolean preserveSplits,
final long nonceGroup, final long nonce) throws IOException;
@@ -155,7 +155,7 @@ public interface MasterServices extends Server {
/**
* Modify the descriptor of an existing table
* @param tableName The table name
- * @param descriptor The updated table descriptor nnn
+ * @param descriptor The updated table descriptor
*/
long modifyTable(final TableName tableName, final TableDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException;
@@ -168,14 +168,14 @@ public interface MasterServices extends Server {
/**
* Enable an existing table
- * @param tableName The table name nnn
+ * @param tableName The table name
*/
long enableTable(final TableName tableName, final long nonceGroup, final long nonce)
throws IOException;
/**
* Disable an existing table
- * @param tableName The table name nnn
+ * @param tableName The table name
*/
long disableTable(final TableName tableName, final long nonceGroup, final long nonce)
throws IOException;
@@ -183,7 +183,7 @@ public interface MasterServices extends Server {
/**
* Add a new column to an existing table
* @param tableName The table name
- * @param column The column definition nnn
+ * @param column The column definition
*/
long addColumn(final TableName tableName, final ColumnFamilyDescriptor column,
final long nonceGroup, final long nonce) throws IOException;
@@ -191,7 +191,7 @@ public interface MasterServices extends Server {
/**
* Modify the column descriptor of an existing column in an existing table
* @param tableName The table name
- * @param descriptor The updated column definition nnn
+ * @param descriptor The updated column definition
*/
long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException;
@@ -205,7 +205,7 @@ public interface MasterServices extends Server {
/**
* Delete a column from an existing table
* @param tableName The table name
- * @param columnName The column name nnn
+ * @param columnName The column name
*/
long deleteColumn(final TableName tableName, final byte[] columnName, final long nonceGroup,
final long nonce) throws IOException;
@@ -216,7 +216,7 @@ public interface MasterServices extends Server {
* @param forcible whether to force to merge even two regions are not adjacent
* @param nonceGroup used to detect duplicate
* @param nonce used to detect duplicate
- * @return procedure Id n
+ * @return procedure Id
*/
long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible,
final long nonceGroup, final long nonce) throws IOException;
@@ -227,7 +227,7 @@ public interface MasterServices extends Server {
* @param splitRow split point
* @param nonceGroup used to detect duplicate
* @param nonce used to detect duplicate
- * @return procedure Id n
+ * @return procedure Id
*/
long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup,
final long nonce) throws IOException;
@@ -271,46 +271,46 @@ public interface MasterServices extends Server {
* Abort a procedure.
* @param procId ID of the procedure
* @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
- * @return true if aborted, false if procedure already completed or does not exist n
+ * @return true if aborted, false if procedure already completed or does not exist
*/
public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
throws IOException;
/**
* Get procedures
- * @return procedure list n
+ * @return procedure list
*/
public List<Procedure<?>> getProcedures() throws IOException;
/**
* Get locks
- * @return lock list n
+ * @return lock list
*/
public List<LockedResource> getLocks() throws IOException;
/**
* Get list of table descriptors by namespace
- * @param name namespace name nn
+ * @param name namespace name
*/
public List<TableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException;
/**
* Get list of table names by namespace
* @param name namespace name
- * @return table names n
+ * @return table names
*/
public List<TableName> listTableNamesByNamespace(String name) throws IOException;
/**
* @param table the table for which last successful major compaction time is queried
* @return the timestamp of the last successful major compaction for the passed table, or 0 if no
- * HFile resulting from a major compaction exists n
+ * HFile resulting from a major compaction exists
*/
public long getLastMajorCompactionTimestamp(TableName table) throws IOException;
/**
- * n * @return the timestamp of the last successful major compaction for the passed region or 0 if
- * no HFile resulting from a major compaction exists n
+ * Returns the timestamp of the last successful major compaction for the passed region or 0 if no
+ * HFile resulting from a major compaction exists
*/
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
index eaf335e52ef..43407f447f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
@@ -55,7 +55,7 @@ public class MetricsAssignmentManager {
}
/**
- * set new value for number of regions in transition. n
+ * set new value for number of regions in transition.
*/
public void updateRITCount(final int ritCount) {
assignmentManagerSource.setRIT(ritCount);
@@ -63,21 +63,21 @@ public class MetricsAssignmentManager {
/**
* update RIT count that are in this state for more than the threshold as defined by the property
- * rit.metrics.threshold.time. n
+ * rit.metrics.threshold.time.
*/
public void updateRITCountOverThreshold(final int ritCountOverThreshold) {
assignmentManagerSource.setRITCountOverThreshold(ritCountOverThreshold);
}
/**
- * update the timestamp for oldest region in transition metrics. n
+ * update the timestamp for oldest region in transition metrics.
*/
public void updateRITOldestAge(final long timestamp) {
assignmentManagerSource.setRITOldestAge(timestamp);
}
/**
- * update the duration metrics of region is transition n
+ * update the duration metrics of region is transition
*/
public void updateRitDuration(long duration) {
assignmentManagerSource.updateRitDuration(duration);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 39d21e32032..854c21da2bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -174,10 +174,11 @@ public class RegionPlacementMaintainer implements Closeable {
}
/**
- * Generate the assignment plan for the existing table nnnn * @param
- * munkresForSecondaryAndTertiary if set on true the assignment plan for the tertiary and
- * secondary will be generated with Munkres algorithm, otherwise will be generated using
- * placeSecondaryAndTertiaryRS n
+ * Generate the assignment plan for the existing table
+ * @param munkresForSecondaryAndTertiary if set on true the assignment plan for the tertiary and
+ * secondary will be generated with Munkres algorithm,
+ * otherwise will be generated using
+ * placeSecondaryAndTertiaryRS
*/
private void genAssignmentPlan(TableName tableName,
SnapshotOfRegionAssignmentFromMeta assignmentSnapshot,
@@ -579,7 +580,7 @@ public class RegionPlacementMaintainer implements Closeable {
}
/**
- * Print the assignment plan to the system output stream n
+ * Print the assignment plan to the system output stream
*/
public static void printAssignmentPlan(FavoredNodesPlan plan) {
if (plan == null) return;
@@ -622,7 +623,7 @@ public class RegionPlacementMaintainer implements Closeable {
}
/**
- * Update the assignment plan to all the region servers nn
+ * Update the assignment plan to all the region servers
*/
private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException {
LOG.info("Start to update the region servers with the new assignment plan");
@@ -737,7 +738,7 @@ public class RegionPlacementMaintainer implements Closeable {
* as a string) also prints the baseline locality
* @param movesPerTable - how many primary regions will move per table
* @param regionLocalityMap - locality map from FS
- * @param newPlan - new assignment plan n
+ * @param newPlan - new assignment plan
*/
public void checkDifferencesWithOldPlan(Map<TableName, Integer> movesPerTable,
Map<String, Map<String, Float>> regionLocalityMap, FavoredNodesPlan newPlan)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 1bf3c73d59a..ed28db78de7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -353,8 +353,8 @@ public class ServerManager {
* Checks if the clock skew between the server and the master. If the clock skew exceeds the
* configured max, it will throw an exception; if it exceeds the configured warning threshold, it
* will log a warning but start normally.
- * @param serverName Incoming servers's name n * @throws ClockOutOfSyncException if the skew
- * exceeds the configured max value
+ * @param serverName Incoming servers's name
+ * @throws ClockOutOfSyncException if the skew exceeds the configured max value
*/
private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
throws ClockOutOfSyncException {
@@ -448,9 +448,7 @@ public class ServerManager {
return builder.build();
}
- /**
- * n * @return ServerMetrics if serverName is known else null
- */
+ /** Returns ServerMetrics if serverName is known else null */
public ServerMetrics getLoad(final ServerName serverName) {
return this.onlineServers.get(serverName);
}
@@ -656,8 +654,8 @@ public class ServerManager {
}
/**
- * Add the server to the drain list. n * @return True if the server is added or the server is
- * already on the drain list.
+ * Add the server to the drain list.
+ * @return True if the server is added or the server is already on the drain list.
*/
public synchronized boolean addServerToDrainList(final ServerName sn) {
// Warn if the server (sn) is not online. ServerName is of the form:
@@ -744,7 +742,7 @@ public class ServerManager {
* the master is stopped - the 'hbase.master.wait.on.regionservers.maxtostart' number of region
* servers is reached - the 'hbase.master.wait.on.regionservers.mintostart' is reached AND there
* have been no new region server in for 'hbase.master.wait.on.regionservers.interval' time AND
- * the 'hbase.master.wait.on.regionservers.timeout' is reached n
+ * the 'hbase.master.wait.on.regionservers.timeout' is reached
*/
public void waitForRegionServers(MonitoredTask status) throws InterruptedException {
final long interval =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index b1f067bfd82..8c91c58dbbe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -110,7 +110,7 @@ public class SplitLogManager {
* Its OK to construct this object even when region-servers are not online. It does lookup the
* orphan tasks in coordination engine but it doesn't block waiting for them to be done.
* @param master the master services
- * @param conf the HBase configuration n
+ * @param conf the HBase configuration
*/
public SplitLogManager(MasterServices master, Configuration conf) throws IOException {
this.server = master;
@@ -180,7 +180,7 @@ public class SplitLogManager {
/**
* @param logDir one region sever wal dir path in .logs
* @throws IOException if there was an error while splitting any log file
- * @return cumulative size of the logfiles split n
+ * @return cumulative size of the logfiles split
*/
public long splitLogDistributed(final Path logDir) throws IOException {
List<Path> logDirs = new ArrayList<>();
@@ -377,9 +377,7 @@ public class SplitLogManager {
}
- /**
- * nn * @return null on success, existing task on error
- */
+ /** Returns null on success, existing task on error */
private Task createTaskIfAbsent(String path, TaskBatch batch) {
Task oldtask;
// batch.installed is only changed via this function and
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 1fe43aba7da..c0b47b0bc24 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -714,7 +714,7 @@ public class MergeTableRegionsProcedure
/**
* The procedure could be restarted from a different machine. If the variable is null, we need to
* retrieve it.
- * @param env MasterProcedureEnv n
+ * @param env MasterProcedureEnv
*/
private ServerName getServerName(final MasterProcedureEnv env) {
if (regionLocation == null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index 93e2cf0bc34..6394f1d6ce6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -385,7 +385,7 @@ public class CloneSnapshotProcedure extends AbstractStateMachineTableProcedure<C
/**
* Action before cloning from snapshot.
- * @param env MasterProcedureEnv nn
+ * @param env MasterProcedureEnv
*/
private void preCloneSnapshot(final MasterProcedureEnv env)
throws IOException, InterruptedException {
@@ -409,7 +409,7 @@ public class CloneSnapshotProcedure extends AbstractStateMachineTableProcedure<C
/**
* Action after cloning from snapshot.
- * @param env MasterProcedureEnv nn
+ * @param env MasterProcedureEnv
*/
private void postCloneSnapshot(final MasterProcedureEnv env)
throws IOException, InterruptedException {
@@ -423,7 +423,7 @@ public class CloneSnapshotProcedure extends AbstractStateMachineTableProcedure<C
/**
* Create regions in file system.
- * @param env MasterProcedureEnv n
+ * @param env MasterProcedureEnv
*/
private List<RegionInfo> createFilesystemLayout(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final List<RegionInfo> newRegions) throws IOException {
@@ -480,7 +480,7 @@ public class CloneSnapshotProcedure extends AbstractStateMachineTableProcedure<C
/**
* Create region layout in file system.
- * @param env MasterProcedureEnv n
+ * @param env MasterProcedureEnv
*/
private List<RegionInfo> createFsLayout(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, List<RegionInfo> newRegions,
@@ -509,7 +509,7 @@ public class CloneSnapshotProcedure extends AbstractStateMachineTableProcedure<C
/**
* Add regions to hbase:meta table.
- * @param env MasterProcedureEnv n
+ * @param env MasterProcedureEnv
*/
private void addRegionsToMeta(final MasterProcedureEnv env) throws IOException {
newRegions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, newRegions);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
index 3466bf68843..bfa2a52067b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
@@ -242,7 +242,7 @@ public class EnableTableProcedure extends AbstractStateMachineTableProcedure<Ena
* Action before any real action of enabling table. Set the exception in the procedure instead of
* throwing it. This approach is to deal with backward compatible with 1.0.
* @param env MasterProcedureEnv
- * @return whether the table passes the necessary checks n
+ * @return whether the table passes the necessary checks
*/
private boolean prepareEnable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeEnabled = true;
@@ -278,7 +278,7 @@ public class EnableTableProcedure extends AbstractStateMachineTableProcedure<Ena
/**
* Action before enabling table.
* @param env MasterProcedureEnv
- * @param state the procedure state nn
+ * @param state the procedure state
*/
private void preEnable(final MasterProcedureEnv env, final EnableTableState state)
throws IOException, InterruptedException {
@@ -288,7 +288,7 @@ public class EnableTableProcedure extends AbstractStateMachineTableProcedure<Ena
/**
* Mark table state to Enabling
* @param env MasterProcedureEnv
- * @param tableName the target table n
+ * @param tableName the target table
*/
protected static void setTableStateToEnabling(final MasterProcedureEnv env,
final TableName tableName) throws IOException {
@@ -300,7 +300,7 @@ public class EnableTableProcedure extends AbstractStateMachineTableProcedure<Ena
/**
* Mark table state to Enabled
- * @param env MasterProcedureEnv n
+ * @param env MasterProcedureEnv
*/
protected static void setTableStateToEnabled(final MasterProcedureEnv env,
final TableName tableName) throws IOException {
@@ -313,7 +313,7 @@ public class EnableTableProcedure extends AbstractStateMachineTableProcedure<Ena
/**
* Action after enabling table.
* @param env MasterProcedureEnv
- * @param state the procedure state nn
+ * @param state the procedure state
*/
private void postEnable(final MasterProcedureEnv env, final EnableTableState state)
throws IOException, InterruptedException {
@@ -323,7 +323,7 @@ public class EnableTableProcedure extends AbstractStateMachineTableProcedure<Ena
/**
* Coprocessor Action.
* @param env MasterProcedureEnv
- * @param state the procedure state nn
+ * @param state the procedure state
*/
private void runCoprocessorAction(final MasterProcedureEnv env, final EnableTableState state)
throws IOException, InterruptedException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
index 946e4eb7a8e..5be0ae5a3fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
@@ -96,7 +96,7 @@ public class RestoreSnapshotProcedure
* Constructor
* @param env MasterProcedureEnv
* @param tableDescriptor the table to operate on
- * @param snapshot snapshot to restore from n
+ * @param snapshot snapshot to restore from
*/
public RestoreSnapshotProcedure(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final SnapshotDescription snapshot,
@@ -320,7 +320,7 @@ public class RestoreSnapshotProcedure
/**
* Action before any real action of restoring from snapshot.
- * @param env MasterProcedureEnv n
+ * @param env MasterProcedureEnv
*/
private void prepareRestore(final MasterProcedureEnv env) throws IOException {
final TableName tableName = getTableName();
@@ -357,7 +357,7 @@ public class RestoreSnapshotProcedure
/**
* Update descriptor
- * @param env MasterProcedureEnv n
+ * @param env MasterProcedureEnv
**/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor);
@@ -365,7 +365,7 @@ public class RestoreSnapshotProcedure
/**
* Execute the on-disk Restore
- * @param env MasterProcedureEnv n
+ * @param env MasterProcedureEnv
**/
private void restoreSnapshot(final MasterProcedureEnv env) throws IOException {
MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index e9d1830b33d..2b1e6a31f92 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -349,9 +349,9 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
}
/**
- * Delete the specified snapshot n * @throws SnapshotDoesNotExistException If the specified
- * snapshot does not exist.
- * @throws IOException For filesystem IOExceptions
+ * Delete the specified snapshot
+ * @throws SnapshotDoesNotExistException If the specified snapshot does not exist.
+ * @throws IOException For filesystem IOExceptions
*/
public void deleteSnapshot(SnapshotDescription snapshot) throws IOException {
// check to see if it is completed
@@ -389,8 +389,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
}
/**
- * Check if the specified snapshot is done n * @return true if snapshot is ready to be restored,
- * false if it is still being taken.
+ * Check if the specified snapshot is done
+ * @return true if snapshot is ready to be restored, false if it is still being taken.
* @throws IOException IOException if error from HDFS or RPC
* @throws UnknownSnapshotException if snapshot is invalid or does not exist.
*/
@@ -657,9 +657,9 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
}
/**
- * Take a snapshot based on the enabled/disabled state of the table. n * @throws
- * HBaseSnapshotException when a snapshot specific exception occurs.
- * @throws IOException when some sort of generic IO exception occurs.
+ * Take a snapshot based on the enabled/disabled state of the table.
+ * @throws HBaseSnapshotException when a snapshot specific exception occurs.
+ * @throws IOException when some sort of generic IO exception occurs.
*/
public void takeSnapshot(SnapshotDescription snapshot) throws IOException {
this.takingSnapshotLock.readLock().lock();
@@ -812,8 +812,9 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
/**
* Set the handler for the current snapshot
* <p>
- * Exposed for TESTING n * @param handler handler the master should use TODO get rid of this if
- * possible, repackaging, modify tests.
+ * Exposed for TESTING
+ * @param handler handler the master should use TODO get rid of this if possible, repackaging,
+ * modify tests.
*/
public synchronized void setSnapshotHandlerForTesting(final TableName tableName,
final SnapshotSentinel handler) {
@@ -857,7 +858,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @param snapshot Snapshot Descriptor
* @param snapshotTableDesc Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
- * @return procId the ID of the clone snapshot procedure n
+ * @return procId the ID of the clone snapshot procedure
*/
private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
@@ -924,8 +925,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
}
/**
- * Restore or Clone the specified snapshot n * @param nonceKey unique identifier to prevent
- * duplicated RPC n
+ * Restore or Clone the specified snapshot
+ * @param nonceKey unique identifier to prevent duplicated RPC
*/
public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey,
final boolean restoreAcl, String customSFT) throws IOException {
@@ -977,7 +978,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @param snapshotTableDesc Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
* @param restoreAcl true to restore acl of snapshot
- * @return procId the ID of the restore snapshot procedure n
+ * @return procId the ID of the restore snapshot procedure
*/
private long restoreSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index 6df17e58e22..e7b0f826082 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
@@ -168,7 +168,7 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher {
* @param writer The store file writer.
* @param status Task that represents the flush operation and may be updated with
* status.
- * @param throughputController A controller to avoid flush too fast. n
+ * @param throughputController A controller to avoid flush too fast.
*/
protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId,
InternalScanner scanner, StoreFileWriter writer, MonitoredTask status,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
index 755f7bb4c3f..3293208771a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
@@ -49,7 +49,7 @@ public class MobFile {
/**
* Internal use only. This is used by the sweeper.
- * @return The store file scanner. n
+ * @return The store file scanner.
*/
public StoreFileScanner getScanner() throws IOException {
List<HStoreFile> sfs = new ArrayList<>();
@@ -64,7 +64,7 @@ public class MobFile {
* Reads a cell from the mob file.
* @param search The cell need to be searched in the mob file.
* @param cacheMobBlocks Should this scanner cache blocks.
- * @return The cell in the mob file. n
+ * @return The cell in the mob file.
*/
public MobCell readCell(Cell search, boolean cacheMobBlocks) throws IOException {
return readCell(search, cacheMobBlocks, sf.getMaxMemStoreTS());
@@ -75,7 +75,7 @@ public class MobFile {
* @param search The cell need to be searched in the mob file.
* @param cacheMobBlocks Should this scanner cache blocks.
* @param readPt the read point.
- * @return The cell in the mob file. n
+ * @return The cell in the mob file.
*/
public MobCell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException {
StoreFileScanner scanner = null;
@@ -108,7 +108,7 @@ public class MobFile {
}
/**
- * Opens the underlying reader. It's not thread-safe. Use MobFileCache.openFile() instead. n
+ * Opens the underlying reader. It's not thread-safe. Use MobFileCache.openFile() instead.
*/
public void open() throws IOException {
sf.initReader();
@@ -116,7 +116,7 @@ public class MobFile {
/**
* Closes the underlying reader, but do no evict blocks belonging to this file. It's not
- * thread-safe. Use MobFileCache.closeFile() instead. n
+ * thread-safe. Use MobFileCache.closeFile() instead.
*/
public void close() throws IOException {
if (sf != null) {
@@ -131,7 +131,7 @@ public class MobFile {
* @param path The path of the underlying StoreFile.
* @param conf The configuration.
* @param cacheConf The CacheConfig.
- * @return An instance of the MobFile. n
+ * @return An instance of the MobFile.
*/
public static MobFile create(FileSystem fs, Path path, Configuration conf, CacheConfig cacheConf)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java
index dc2bf5c14e3..ed1803cb38d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java
@@ -195,7 +195,7 @@ public class MobFileCache {
* @param fs The current file system.
* @param path The file path.
* @param cacheConf The current MobCacheConfig
- * @return A opened mob file. n
+ * @return A opened mob file.
*/
public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
if (!isCacheEnabled) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java
index d65d96cc64c..8f163cf5cc2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java
@@ -51,8 +51,8 @@ public final class MobFileName {
public static final String REGION_SEP = "_";
/**
- * n * The start key. n * The string of the latest timestamp of cells in this file, the format is
- * yyyymmdd. n * The uuid
+ * The start key. The string of the latest timestamp of cells in this file, the format is
+ * yyyymmdd. The uuid
* @param regionName name of a region, where this file was created during flush or compaction.
*/
private MobFileName(byte[] startKey, String date, String uuid, String regionName) {
@@ -64,8 +64,8 @@ public final class MobFileName {
}
/**
- * n * The md5 hex string of the start key. n * The string of the latest timestamp of cells in
- * this file, the format is yyyymmdd. n * The uuid
+ * The md5 hex string of the start key. The string of the latest timestamp of cells in this file,
+ * the format is yyyymmdd. The uuid
* @param regionName name of a region, where this file was created during flush or compaction.
*/
private MobFileName(String startKey, String date, String uuid, String regionName) {
@@ -77,8 +77,8 @@ public final class MobFileName {
}
/**
- * Creates an instance of MobFileName n * The md5 hex string of the start key. n * The string of
- * the latest timestamp of cells in this file, the format is yyyymmdd.
+ * Creates an instance of MobFileName The md5 hex string of the start key. The string of the
+ * latest timestamp of cells in this file, the format is yyyymmdd.
* @param uuid The uuid.
* @param regionName name of a region, where this file was created during flush or compaction.
* @return An instance of a MobFileName.
@@ -88,8 +88,8 @@ public final class MobFileName {
}
/**
- * Creates an instance of MobFileName n * The md5 hex string of the start key. n * The string of
- * the latest timestamp of cells in this file, the format is yyyymmdd.
+ * Creates an instance of MobFileName The md5 hex string of the start key. The string of the
+ * latest timestamp of cells in this file, the format is yyyymmdd.
* @param uuid The uuid.
* @param regionName name of a region, where this file was created during flush or compaction.
* @return An instance of a MobFileName.
@@ -120,7 +120,7 @@ public final class MobFileName {
/**
* get startKey from MobFileName.
- * @param fileName file name. n
+ * @param fileName file name.
*/
public static String getStartKeyFromName(final String fileName) {
return fileName.substring(0, STARTKEY_END_INDEX);
@@ -128,7 +128,7 @@ public final class MobFileName {
/**
* get date from MobFileName.
- * @param fileName file name. n
+ * @param fileName file name.
*/
public static String getDateFromName(final String fileName) {
return fileName.substring(STARTKEY_END_INDEX, DATE_END_INDEX);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 43cf4255235..e04d67a0aaa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -435,7 +435,8 @@ public final class MobUtils {
/**
* Gets the RegionInfo of the mob files. This is a dummy region. The mob files are not saved in a
- * region in HBase. It's internally used only. n * @return A dummy mob region info.
+ * region in HBase. It's internally used only.
+ * @return A dummy mob region info.
*/
public static RegionInfo getMobRegionInfo(TableName tableName) {
return RegionInfoBuilder.newBuilder(tableName).setStartKey(MobConstants.MOB_REGION_NAME_BYTES)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java
index c08a462ed0b..321fb7e5c21 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java
@@ -44,7 +44,7 @@ public abstract class ThreadMonitoring {
}
/**
- * Print all of the thread's information and stack traces. nnn
+ * Print all of the thread's information and stack traces.
*/
public static void appendThreadInfo(StringBuilder sb, ThreadInfo info, String indent) {
boolean contention = threadBean.isThreadContentionMonitoringEnabled();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
index e54ea3febde..d95378f9b86 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
@@ -70,8 +70,8 @@ class NamespaceStateManager {
}
/**
- * Check if adding a region violates namespace quota, if not update namespace cache. nnn * @return
- * true, if region can be added to table.
+ * Check if adding a region violates namespace quota, if not update namespace cache.
+ * @return true, if region can be added to table.
* @throws IOException Signals that an I/O exception has occurred.
*/
synchronized boolean checkAndUpdateNamespaceRegionCount(TableName name, byte[] regionName,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
index 91bceacae09..3fe09e848d3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
@@ -53,14 +53,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDe
public abstract class MasterProcedureManager extends ProcedureManager implements Stoppable {
/**
* Initialize a globally barriered procedure for master.
- * @param master Master service interface nnn
+ * @param master Master service interface
*/
public abstract void initialize(MasterServices master, MetricsMaster metricsMaster)
throws KeeperException, IOException, UnsupportedOperationException;
/**
* Execute a distributed procedure on cluster
- * @param desc Procedure description n
+ * @param desc Procedure description
*/
public void execProcedure(ProcedureDescription desc) throws IOException {
}
@@ -68,7 +68,7 @@ public abstract class MasterProcedureManager extends ProcedureManager implements
/**
* Execute a distributed procedure on cluster with return data.
* @param desc Procedure description
- * @return data returned from the procedure execution, null if no data n
+ * @return data returned from the procedure execution, null if no data
*/
public byte[] execProcedureWithRet(ProcedureDescription desc) throws IOException {
return null;
@@ -84,7 +84,7 @@ public abstract class MasterProcedureManager extends ProcedureManager implements
/**
* Check if the procedure is finished successfully
* @param desc Procedure description
- * @return true if the specified procedure is finished successfully n
+ * @return true if the specified procedure is finished successfully
*/
public abstract boolean isProcedureDone(ProcedureDescription desc) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
index 8eb477fb846..d823fac1aa3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
@@ -234,7 +234,7 @@ public class Procedure implements Callable<Void>, ForeignExceptionListener {
/**
* Sends a message to Members to create a new {@link Subprocedure} for this Procedure and execute
- * the {@link Subprocedure#acquireBarrier} step. n
+ * the {@link Subprocedure#acquireBarrier} step.
*/
public void sendGlobalBarrierStart() throws ForeignException {
// start the procedure
@@ -255,7 +255,7 @@ public class Procedure implements Callable<Void>, ForeignExceptionListener {
* Sends a message to all members that the global barrier condition has been satisfied. This
* should only be executed after all members have completed its
* {@link Subprocedure#acquireBarrier()} call successfully. This triggers the member
- * {@link Subprocedure#insideBarrier} method. n
+ * {@link Subprocedure#insideBarrier} method.
*/
public void sendGlobalBarrierReached() throws ForeignException {
try {
@@ -285,7 +285,7 @@ public class Procedure implements Callable<Void>, ForeignExceptionListener {
//
/**
- * Call back triggered by an individual member upon successful local barrier acquisition n
+ * Call back triggered by an individual member upon successful local barrier acquisition
*/
public void barrierAcquiredByMember(String member) {
LOG.debug("member: '" + member + "' joining acquired barrier for procedure '" + procName
@@ -307,7 +307,7 @@ public class Procedure implements Callable<Void>, ForeignExceptionListener {
/**
* Call back triggered by a individual member upon successful local in-barrier execution and
- * release nn
+ * release
*/
public void barrierReleasedByMember(String member, byte[] dataFromMember) {
boolean removed = false;
@@ -329,7 +329,7 @@ public class Procedure implements Callable<Void>, ForeignExceptionListener {
/**
* Waits until the entire procedure has globally completed, or has been aborted. If an exception
- * is thrown the procedure may or not have run cleanup to trigger the completion latch yet. nn
+ * is thrown the procedure may or not have run cleanup to trigger the completion latch yet.
*/
public void waitForCompleted() throws ForeignException, InterruptedException {
waitForLatch(completedLatch, monitor, wakeFrequency, procName + " completed");
@@ -338,7 +338,7 @@ public class Procedure implements Callable<Void>, ForeignExceptionListener {
/**
* Waits until the entire procedure has globally completed, or has been aborted. If an exception
* is thrown the procedure may or not have run cleanup to trigger the completion latch yet.
- * @return data returned from procedure members upon successfully completing subprocedure. nn
+ * @return data returned from procedure members upon successfully completing subprocedure.
*/
public HashMap<String, byte[]> waitForCompletedWithRet()
throws ForeignException, InterruptedException {
@@ -347,7 +347,7 @@ public class Procedure implements Callable<Void>, ForeignExceptionListener {
}
/**
- * Check if the entire procedure has globally completed, or has been aborted. n
+ * Check if the entire procedure has globally completed, or has been aborted.
*/
public boolean isCompleted() throws ForeignException {
// Rethrow exception if any
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java
index e02776ecb69..2d654691543 100644
... 7478 lines suppressed ...