You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2022/10/06 16:43:34 UTC

[hbase] branch branch-2.4 updated: HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)

This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
     new 8c2dd12adbc HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)
8c2dd12adbc is described below

commit 8c2dd12adbc3b30c079554c6073663d0649d055d
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Thu Oct 6 18:17:34 2022 +0800

    HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)
    
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    (cherry picked from commit 63cdd026f08cdde6ac0fde1342ffd050e8e02441)
    
    Conflicts:
            hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
            hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
            hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
            hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
            hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
            hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
            hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
            hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java
            hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
---
 .../FanOutOneBlockAsyncDFSOutputSaslHelper.java    |   2 +-
 .../hadoop/hbase/AsyncMetaTableAccessor.java       |  34 +++--
 .../java/org/apache/hadoop/hbase/ClusterId.java    |   7 +-
 .../org/apache/hadoop/hbase/ClusterStatus.java     |   2 +-
 .../org/apache/hadoop/hbase/HColumnDescriptor.java |  10 +-
 .../java/org/apache/hadoop/hbase/HRegionInfo.java  |  96 +++++++-----
 .../org/apache/hadoop/hbase/HRegionLocation.java   |   6 +-
 .../org/apache/hadoop/hbase/HTableDescriptor.java  |  12 +-
 .../org/apache/hadoop/hbase/MetaTableAccessor.java |   2 +-
 .../hbase/NotAllMetaRegionsOnlineException.java    |   3 +-
 .../java/org/apache/hadoop/hbase/ServerLoad.java   |   2 +-
 .../org/apache/hadoop/hbase/ServerMetrics.java     |   2 +-
 .../java/org/apache/hadoop/hbase/client/Admin.java |  27 ++--
 .../org/apache/hadoop/hbase/client/Append.java     |  19 +--
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |  23 +--
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |   2 +-
 .../apache/hadoop/hbase/client/AsyncProcess.java   |   2 +-
 .../apache/hadoop/hbase/client/ClientScanner.java  |   2 +-
 .../hadoop/hbase/client/ClusterConnection.java     |   6 +-
 .../hbase/client/ColumnFamilyDescriptor.java       |   2 +-
 .../client/ColumnFamilyDescriptorBuilder.java      |  17 ++-
 .../hbase/client/ConnectionImplementation.java     |   2 +-
 .../org/apache/hadoop/hbase/client/Delete.java     |  13 +-
 .../java/org/apache/hadoop/hbase/client/Get.java   |  19 +--
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |   6 +-
 .../hadoop/hbase/client/HTableMultiplexer.java     |   7 +-
 .../hbase/client/ImmutableHColumnDescriptor.java   |   2 +-
 .../hadoop/hbase/client/ImmutableHRegionInfo.java  |   2 +-
 .../hbase/client/ImmutableHTableDescriptor.java    |   2 +-
 .../org/apache/hadoop/hbase/client/Increment.java  |  10 +-
 .../apache/hadoop/hbase/client/MasterCallable.java |   2 +-
 .../org/apache/hadoop/hbase/client/MetaCache.java  |   6 +-
 .../apache/hadoop/hbase/client/MultiAction.java    |   5 +-
 .../apache/hadoop/hbase/client/MultiResponse.java  |   6 +-
 .../hadoop/hbase/client/MutableRegionInfo.java     |   7 +-
 .../org/apache/hadoop/hbase/client/Mutation.java   |  46 +++---
 .../org/apache/hadoop/hbase/client/Operation.java  |   2 +-
 .../hbase/client/OperationWithAttributes.java      |   2 +-
 .../client/PreemptiveFastFailInterceptor.java      |   9 +-
 .../java/org/apache/hadoop/hbase/client/Put.java   |  13 +-
 .../java/org/apache/hadoop/hbase/client/Query.java |   9 +-
 .../hbase/client/RegionAdminServiceCallable.java   |   2 +-
 .../hadoop/hbase/client/RegionInfoDisplay.java     |  10 +-
 .../hadoop/hbase/client/RegionReplicaUtil.java     |   5 +-
 .../hadoop/hbase/client/RegionServerCallable.java  |   2 +-
 .../org/apache/hadoop/hbase/client/Result.java     |  17 ++-
 .../apache/hadoop/hbase/client/ResultScanner.java  |   2 +-
 .../hbase/client/RetryingCallerInterceptor.java    |   8 +-
 .../client/RetryingCallerInterceptorContext.java   |  10 +-
 .../apache/hadoop/hbase/client/RowMutations.java   |   4 +-
 .../java/org/apache/hadoop/hbase/client/Scan.java  | 105 +++++++------
 .../hadoop/hbase/client/SecureBulkLoadClient.java  |   8 +-
 .../java/org/apache/hadoop/hbase/client/Table.java |  19 +--
 .../hadoop/hbase/client/TableDescriptor.java       |   2 +-
 .../hbase/client/TableDescriptorBuilder.java       |  13 +-
 .../org/apache/hadoop/hbase/client/TableState.java |  10 +-
 .../hbase/client/UnmodifyableHRegionInfo.java      |   2 +-
 .../hbase/client/backoff/ServerStatistics.java     |   2 +-
 .../client/metrics/ServerSideScanMetrics.java      |  18 +--
 .../hbase/client/replication/ReplicationAdmin.java |  17 ++-
 .../hbase/coprocessor/ColumnInterpreter.java       |  43 +++---
 .../hbase/coprocessor/CoprocessorException.java    |   2 +-
 .../hbase/exceptions/ClientExceptionsUtil.java     |   2 +-
 .../exceptions/FailedSanityCheckException.java     |   6 +-
 .../hadoop/hbase/filter/BinaryComparator.java      |   8 +-
 .../hbase/filter/BinaryPrefixComparator.java       |   8 +-
 .../apache/hadoop/hbase/filter/BitComparator.java  |   7 +-
 .../hbase/filter/ColumnPaginationFilter.java       |  13 +-
 .../hadoop/hbase/filter/ColumnRangeFilter.java     |   4 +-
 .../hadoop/hbase/filter/ColumnValueFilter.java     |   4 +-
 .../apache/hadoop/hbase/filter/CompareFilter.java  |   7 +-
 .../hadoop/hbase/filter/DependentColumnFilter.java |   8 +-
 .../apache/hadoop/hbase/filter/FamilyFilter.java   |   3 +-
 .../org/apache/hadoop/hbase/filter/Filter.java     |   9 +-
 .../org/apache/hadoop/hbase/filter/FilterBase.java |   6 +-
 .../org/apache/hadoop/hbase/filter/FilterList.java |  13 +-
 .../apache/hadoop/hbase/filter/FilterListBase.java |   2 +-
 .../FirstKeyValueMatchingQualifiersFilter.java     |   3 +-
 .../apache/hadoop/hbase/filter/FuzzyRowFilter.java |  11 +-
 .../hadoop/hbase/filter/InclusiveStopFilter.java   |   4 +-
 .../apache/hadoop/hbase/filter/KeyOnlyFilter.java  |   3 +-
 .../apache/hadoop/hbase/filter/LongComparator.java |   4 +-
 .../hbase/filter/MultipleColumnPrefixFilter.java   |   4 +-
 .../apache/hadoop/hbase/filter/NullComparator.java |   8 +-
 .../org/apache/hadoop/hbase/filter/PageFilter.java |   3 +-
 .../hadoop/hbase/filter/RandomRowFilter.java       |   8 +-
 .../hadoop/hbase/filter/RegexStringComparator.java |   8 +-
 .../org/apache/hadoop/hbase/filter/RowFilter.java  |   3 +-
 .../filter/SingleColumnValueExcludeFilter.java     |  12 +-
 .../hbase/filter/SingleColumnValueFilter.java      |  13 +-
 .../org/apache/hadoop/hbase/filter/SkipFilter.java |   3 +-
 .../hadoop/hbase/filter/SubstringComparator.java   |   8 +-
 .../hadoop/hbase/filter/TimestampsFilter.java      |   2 +-
 .../apache/hadoop/hbase/filter/ValueFilter.java    |   3 +-
 .../apache/hadoop/hbase/ipc/CellBlockBuilder.java  |   7 +-
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |  60 ++++----
 .../hadoop/hbase/regionserver/LeaseException.java  |   3 +-
 .../regionserver/wal/FailedLogCloseException.java  |   3 +-
 .../wal/FailedSyncBeforeLogCloseException.java     |   3 +-
 .../hbase/security/AbstractHBaseSaslRpcClient.java |   4 +-
 .../hadoop/hbase/security/EncryptionUtil.java      |   4 +-
 .../hadoop/hbase/security/HBaseSaslRpcClient.java  |   6 +-
 .../hbase/security/access/AccessControlClient.java |  37 ++---
 .../hbase/security/access/AccessControlUtil.java   |  39 +++--
 .../security/visibility/VisibilityClient.java      |  44 +++---
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 144 +++++++++---------
 .../hbase/shaded/protobuf/RequestConverter.java    | 108 +++++++++-----
 .../hbase/shaded/protobuf/ResponseConverter.java   |  22 +--
 .../org/apache/hadoop/hbase/util/Writables.java    |   4 +-
 .../apache/hadoop/hbase/zookeeper/ZNodePaths.java  |   4 +-
 .../hadoop/hbase/client/TestClientNoCluster.java   |  26 ++--
 .../hadoop/hbase/client/TestDeleteTimeStamp.java   |   2 +-
 .../hadoop/hbase/client/TestSnapshotFromAdmin.java |   2 +-
 .../java/org/apache/hadoop/hbase/AuthUtil.java     |   6 +-
 .../hadoop/hbase/ByteBufferKeyOnlyKeyValue.java    |   2 +-
 .../java/org/apache/hadoop/hbase/CellBuilder.java  |   2 +-
 .../apache/hadoop/hbase/CellComparatorImpl.java    |   5 +-
 .../java/org/apache/hadoop/hbase/CellUtil.java     | 115 +++++++-------
 .../apache/hadoop/hbase/CompoundConfiguration.java |   2 +-
 .../java/org/apache/hadoop/hbase/ExtendedCell.java |   2 +-
 .../apache/hadoop/hbase/HBaseConfiguration.java    |   2 +-
 .../java/org/apache/hadoop/hbase/KeyValue.java     | 114 +++++++-------
 .../org/apache/hadoop/hbase/KeyValueTestUtil.java  |   4 +-
 .../java/org/apache/hadoop/hbase/KeyValueUtil.java |  63 ++++----
 .../org/apache/hadoop/hbase/PrivateCellUtil.java   |  92 +++++++-----
 .../src/main/java/org/apache/hadoop/hbase/Tag.java |   2 +-
 .../org/apache/hadoop/hbase/codec/BaseDecoder.java |   2 +-
 .../org/apache/hadoop/hbase/codec/CellCodec.java   |   2 +-
 .../hadoop/hbase/codec/CellCodecWithTags.java      |   2 +-
 .../hadoop/hbase/filter/ByteArrayComparable.java   |   8 +-
 .../hadoop/hbase/io/ByteBufferOutputStream.java    |   2 +-
 .../hbase/io/ByteBufferWriterOutputStream.java     |   4 +-
 .../apache/hadoop/hbase/io/CellOutputStream.java   |   4 +-
 .../hadoop/hbase/io/ImmutableBytesWritable.java    |  10 +-
 .../hadoop/hbase/io/TagCompressionContext.java     |   8 +-
 .../org/apache/hadoop/hbase/io/crypto/Cipher.java  |   8 +-
 .../apache/hadoop/hbase/io/crypto/Decryptor.java   |   6 +-
 .../apache/hadoop/hbase/io/crypto/Encryption.java  |  22 +--
 .../apache/hadoop/hbase/io/crypto/Encryptor.java   |   6 +-
 .../apache/hadoop/hbase/io/crypto/KeyProvider.java |   6 +-
 .../hadoop/hbase/io/encoding/DataBlockEncoder.java |   9 +-
 .../hbase/io/encoding/DataBlockEncoding.java       |   2 +-
 .../hadoop/hbase/io/encoding/EncodedDataBlock.java |   4 +-
 .../io/encoding/HFileBlockDecodingContext.java     |   6 +-
 .../io/hadoopbackport/ThrottledInputStream.java    |   3 +-
 .../apache/hadoop/hbase/io/util/Dictionary.java    |   4 +-
 .../apache/hadoop/hbase/io/util/StreamUtils.java   |   4 +-
 .../java/org/apache/hadoop/hbase/nio/ByteBuff.java |  61 ++++----
 .../org/apache/hadoop/hbase/nio/MultiByteBuff.java |  47 +++---
 .../org/apache/hadoop/hbase/security/User.java     |   5 +-
 .../apache/hadoop/hbase/security/UserProvider.java |   2 +-
 .../hbase/util/AbstractPositionedByteRange.java    |   4 +-
 .../apache/hadoop/hbase/util/ByteBufferUtils.java  |  34 +++--
 .../org/apache/hadoop/hbase/util/ByteRange.java    |  11 +-
 .../java/org/apache/hadoop/hbase/util/Bytes.java   |  31 ++--
 .../org/apache/hadoop/hbase/util/ChecksumType.java |   8 +-
 .../java/org/apache/hadoop/hbase/util/Classes.java |   7 +-
 .../apache/hadoop/hbase/util/CommonFSUtils.java    |   2 +-
 .../hadoop/hbase/util/CoprocessorClassLoader.java  |   2 +-
 .../java/org/apache/hadoop/hbase/util/HashKey.java |   4 +-
 .../org/apache/hadoop/hbase/util/KeyLocker.java    |   2 +-
 .../java/org/apache/hadoop/hbase/util/MD5Hash.java |   8 +-
 .../java/org/apache/hadoop/hbase/util/Pair.java    |   4 +-
 .../apache/hadoop/hbase/util/PairOfSameType.java   |   4 +-
 .../hadoop/hbase/util/PositionedByteRange.java     |   4 +-
 .../apache/hadoop/hbase/util/PrettyPrinter.java    |   7 +-
 .../hadoop/hbase/util/SimpleMutableByteRange.java  |   8 +-
 .../util/SimplePositionedMutableByteRange.java     |  14 +-
 .../java/org/apache/hadoop/hbase/util/Threads.java |   2 +-
 .../apache/hadoop/hbase/util/TimeMeasurable.java   |   2 +-
 .../org/apache/hadoop/hbase/util/UnsafeAccess.java |   4 +-
 .../hadoop/hbase/util/WindowMovingAverage.java     |   2 +-
 .../apache/hadoop/hbase/zookeeper/ZKConfig.java    |   8 +-
 .../hadoop/hbase/TestHBaseConfiguration.java       |  10 +-
 .../regionserver/MetricsRegionServerSource.java    |   2 +-
 .../hadoop/hbase/rest/MetricsRESTSource.java       |   2 +-
 .../hbase/thrift/MetricsThriftServerSource.java    |   2 +-
 .../apache/hadoop/metrics2/MetricHistogram.java    |   2 +-
 .../metrics2/util/MetricSampleQuantiles.java       |   4 +-
 .../org/apache/hadoop/hbase/http/HttpServer.java   |   8 +-
 .../hbase/http/ProxyUserAuthenticationFilter.java  |   2 +-
 .../hadoop/hbase/http/jmx/JMXJsonServlet.java      |   4 +-
 .../apache/hadoop/hbase/util/JSONMetricUtil.java   |   2 +-
 .../hadoop/hbase/http/TestHttpServerLifecycle.java |   2 +-
 .../hadoop/hbase/DistributedHBaseCluster.java      |   2 +-
 .../hbase/mapreduce/IntegrationTestBulkLoad.java   |   2 +-
 .../hbase/test/IntegrationTestBigLinkedList.java   |  18 +--
 .../hbase/test/IntegrationTestReplication.java     |   6 +-
 .../org/apache/hadoop/hbase/mapred/Driver.java     |   3 -
 .../hadoop/hbase/mapred/GroupingTableMap.java      |  10 +-
 .../hadoop/hbase/mapred/IdentityTableMap.java      |   2 +-
 .../hadoop/hbase/mapred/IdentityTableReduce.java   |   2 +-
 .../mapred/MultiTableSnapshotInputFormat.java      |   1 -
 .../org/apache/hadoop/hbase/mapred/RowCounter.java |   7 +-
 .../hadoop/hbase/mapred/TableInputFormatBase.java  |   8 +-
 .../hadoop/hbase/mapred/TableOutputFormat.java     |   3 +-
 .../hadoop/hbase/mapred/TableRecordReader.java     |  14 +-
 .../hadoop/hbase/mapred/TableRecordReaderImpl.java |   2 -
 .../org/apache/hadoop/hbase/mapred/TableSplit.java |   2 +-
 .../apache/hadoop/hbase/mapreduce/CellCreator.java |   8 +-
 .../apache/hadoop/hbase/mapreduce/HashTable.java   |   2 +-
 .../apache/hadoop/hbase/mapreduce/ImportTsv.java   |   6 +-
 .../mapreduce/MultiTableHFileOutputFormat.java     |   2 +-
 .../hbase/mapreduce/MultiTableOutputFormat.java    |  12 +-
 .../MultiTableSnapshotInputFormatImpl.java         |   8 +-
 .../apache/hadoop/hbase/mapreduce/RowCounter.java  |   2 +-
 .../hbase/mapreduce/TableInputFormatBase.java      |   7 +-
 .../hadoop/hbase/mapreduce/TableMapReduceUtil.java |   4 +-
 .../hadoop/hbase/mapreduce/TableOutputFormat.java  |   3 +-
 .../hadoop/hbase/mapreduce/TableRecordReader.java  |   3 +-
 .../hadoop/hbase/mapreduce/TextSortReducer.java    |   4 +-
 .../hadoop/hbase/mapreduce/TsvImporterMapper.java  |   4 +-
 .../hbase/mapreduce/TsvImporterTextMapper.java     |   4 +-
 .../apache/hadoop/hbase/PerformanceEvaluation.java |  13 +-
 .../hadoop/hbase/mapred/TestTableInputFormat.java  |  18 +--
 .../mapreduce/MultiTableInputFormatTestBase.java   |   2 +-
 .../mapreduce/TestCellBasedHFileOutputFormat2.java |   8 +-
 .../mapreduce/TestCellBasedImportExport2.java      |  16 +-
 .../hbase/mapreduce/TestCellBasedWALPlayer2.java   |   2 +-
 .../hadoop/hbase/mapreduce/TestCopyTable.java      |   2 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java    |   8 +-
 .../hadoop/hbase/mapreduce/TestImportExport.java   |  16 +-
 .../TestImportTSVWithOperationAttributes.java      |   8 +-
 .../TestImportTSVWithVisibilityLabels.java         |   6 +-
 .../hadoop/hbase/mapreduce/TestImportTsv.java      |   2 +-
 .../mapreduce/TestMultiTableInputFormatBase.java   |   2 +-
 .../mapreduce/TestMultithreadedTableMapper.java    |   7 +-
 .../hadoop/hbase/mapreduce/TestRowCounter.java     |  26 ++--
 .../hbase/mapreduce/TestTableInputFormat.java      |  25 ++--
 .../hadoop/hbase/mapreduce/TestTableMapReduce.java |   4 +-
 .../hbase/mapreduce/TestTableMapReduceBase.java    |   5 +-
 .../store/wal/ProcedureWALPrettyPrinter.java       |   3 +-
 .../hadoop/hbase/replication/ReplicationPeers.java |   2 +-
 .../hbase/replication/ReplicationTracker.java      |   2 +-
 .../apache/hadoop/hbase/rest/ExistsResource.java   |   2 +-
 .../apache/hadoop/hbase/rest/MultiRowResource.java |   2 +-
 .../hbase/rest/NamespacesInstanceResource.java     |   4 +-
 .../hadoop/hbase/rest/NamespacesResource.java      |   2 +-
 .../hadoop/hbase/rest/ProtobufMessageHandler.java  |   2 +-
 .../org/apache/hadoop/hbase/rest/RESTServlet.java  |   2 +-
 .../apache/hadoop/hbase/rest/RegionsResource.java  |   2 +-
 .../org/apache/hadoop/hbase/rest/RootResource.java |   2 +-
 .../org/apache/hadoop/hbase/rest/RowResource.java  |   2 +-
 .../apache/hadoop/hbase/rest/ScannerResource.java  |   2 +-
 .../apache/hadoop/hbase/rest/SchemaResource.java   |   2 +-
 .../hbase/rest/StorageClusterStatusResource.java   |   2 +-
 .../hbase/rest/StorageClusterVersionResource.java  |   2 +-
 .../apache/hadoop/hbase/rest/TableResource.java    |   2 +-
 .../apache/hadoop/hbase/rest/VersionResource.java  |   2 +-
 .../apache/hadoop/hbase/rest/client/Client.java    |  42 +++---
 .../rest/filter/RestCsrfPreventionFilter.java      |   2 +-
 .../apache/hadoop/hbase/rest/model/CellModel.java  |  10 +-
 .../hbase/rest/model/NamespacesInstanceModel.java  |   4 +-
 .../hadoop/hbase/rest/model/NamespacesModel.java   |   2 +-
 .../hadoop/hbase/rest/model/ScannerModel.java      |   6 +-
 .../hadoop/hbase/rest/model/TableInfoModel.java    |   2 +-
 .../apache/hadoop/hbase/rest/model/TableModel.java |   2 +-
 .../hadoop/hbase/rest/client/RemoteAdmin.java      |  16 +-
 .../hadoop/hbase/rest/client/TestRemoteTable.java  |  41 +++++
 .../java/org/apache/hadoop/hbase/HealthReport.java |   4 +-
 .../org/apache/hadoop/hbase/LocalHBaseCluster.java |  14 +-
 .../apache/hadoop/hbase/RegionStateListener.java   |   4 +-
 .../java/org/apache/hadoop/hbase/SplitLogTask.java |   4 +-
 .../apache/hadoop/hbase/backup/HFileArchiver.java  |   7 +-
 .../hadoop/hbase/client/VersionInfoUtil.java       |   3 +-
 .../hadoop/hbase/constraint/Constraints.java       |  85 +++++------
 .../coordination/SplitLogManagerCoordination.java  |   2 +-
 .../ZKSplitLogManagerCoordination.java             |   4 +-
 .../coordination/ZkSplitLogWorkerCoordination.java |   4 +-
 .../hadoop/hbase/coprocessor/CoprocessorHost.java  |   2 +-
 .../hadoop/hbase/coprocessor/MasterObserver.java   |  49 +++---
 .../hadoop/hbase/coprocessor/RegionObserver.java   |  18 +--
 .../hbase/errorhandling/ForeignException.java      |   8 +-
 .../hbase/errorhandling/ForeignExceptionSnare.java |   2 +-
 .../apache/hadoop/hbase/executor/EventHandler.java |   2 +-
 .../hadoop/hbase/executor/ExecutorService.java     |   2 +-
 .../hbase/favored/FavoredNodeAssignmentHelper.java |  14 +-
 .../hadoop/hbase/favored/FavoredNodesPlan.java     |   2 +-
 .../java/org/apache/hadoop/hbase/io/Reference.java |  21 +--
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |   2 +-
 .../hadoop/hbase/io/hfile/BlockCacheUtil.java      |  12 +-
 .../hbase/io/hfile/CacheableDeserializer.java      |   2 +-
 .../hadoop/hbase/io/hfile/CompoundBloomFilter.java |   2 +-
 .../hbase/io/hfile/CompoundBloomFilterWriter.java  |   6 +-
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java     |  65 ++++----
 .../hbase/io/hfile/HFileDataBlockEncoder.java      |   6 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java     |  13 +-
 .../apache/hadoop/hbase/io/hfile/HFileScanner.java |  21 +--
 .../hadoop/hbase/io/hfile/HFileWriterImpl.java     |   6 +-
 .../hadoop/hbase/io/hfile/InlineBlockWriter.java   |   9 +-
 .../hbase/io/hfile/bucket/BucketAllocator.java     |   5 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |   5 +-
 .../hbase/io/hfile/bucket/ByteBufferIOEngine.java  |   6 +-
 .../hadoop/hbase/io/hfile/bucket/FileIOEngine.java |  10 +-
 .../hbase/io/hfile/bucket/FileMmapIOEngine.java    |   6 +-
 .../hadoop/hbase/io/hfile/bucket/IOEngine.java     |   6 +-
 .../hadoop/hbase/io/util/MemorySizeUtil.java       |   9 +-
 .../apache/hadoop/hbase/ipc/PriorityFunction.java  |   6 +-
 .../java/org/apache/hadoop/hbase/ipc/RpcCall.java  |   2 +-
 .../apache/hadoop/hbase/ipc/RpcCallContext.java    |   2 +-
 .../hadoop/hbase/ipc/RpcSchedulerContext.java      |   3 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java     |  13 +-
 .../hadoop/hbase/ipc/RpcServerInterface.java       |   2 +-
 .../hadoop/hbase/ipc/ServerRpcConnection.java      |  10 +-
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java       |   6 +-
 .../apache/hadoop/hbase/ipc/SimpleRpcServer.java   |  12 +-
 .../hadoop/hbase/ipc/SimpleRpcServerResponder.java |   6 +-
 .../hbase/ipc/SimpleServerRpcConnection.java       |   2 +-
 .../hbase/master/AssignmentVerificationReport.java |   5 +-
 .../hadoop/hbase/master/DrainingServerTracker.java |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |   3 +-
 .../apache/hadoop/hbase/master/LoadBalancer.java   |  20 +--
 .../hadoop/hbase/master/MasterCoprocessorHost.java |  24 +--
 .../hadoop/hbase/master/MasterFileSystem.java      |  10 +-
 .../hadoop/hbase/master/MasterRpcServices.java     |  10 +-
 .../apache/hadoop/hbase/master/MasterServices.java |  42 +++---
 .../hadoop/hbase/master/MasterWalManager.java      |   7 +-
 .../hbase/master/MetricsAssignmentManager.java     |   8 +-
 .../hbase/master/RegionPlacementMaintainer.java    |  17 ++-
 .../apache/hadoop/hbase/master/ServerManager.java  |  22 ++-
 .../master/SnapshotOfRegionAssignmentFromMeta.java |   2 +-
 .../hadoop/hbase/master/SplitLogManager.java       |   8 +-
 .../hadoop/hbase/master/TableNamespaceManager.java |   2 +-
 .../assignment/MergeTableRegionsProcedure.java     |   2 +-
 .../hbase/master/balancer/LoadBalancerFactory.java |   3 +-
 .../master/balancer/RegionLocationFinder.java      |   2 +-
 .../hadoop/hbase/master/cleaner/CleanerChore.java  |   3 -
 .../master/procedure/CloneSnapshotProcedure.java   |  10 +-
 .../master/procedure/CreateNamespaceProcedure.java |  12 +-
 .../master/procedure/DeleteNamespaceProcedure.java |  20 +--
 .../master/procedure/EnableTableProcedure.java     |  12 +-
 .../master/procedure/ModifyNamespaceProcedure.java |   8 +-
 .../master/procedure/ModifyTableProcedure.java     |   2 +-
 .../master/procedure/RestoreSnapshotProcedure.java |  10 +-
 .../hbase/master/snapshot/SnapshotManager.java     |  29 ++--
 .../hadoop/hbase/mob/DefaultMobStoreFlusher.java   |   2 +-
 .../java/org/apache/hadoop/hbase/mob/MobFile.java  |  12 +-
 .../org/apache/hadoop/hbase/mob/MobFileCache.java  |   2 +-
 .../org/apache/hadoop/hbase/mob/MobFileName.java   |  31 ++--
 .../java/org/apache/hadoop/hbase/mob/MobUtils.java |  10 +-
 .../hadoop/hbase/mob/compactions/MobCompactor.java |   6 +-
 .../hadoop/hbase/monitoring/ThreadMonitoring.java  |   2 +-
 .../hbase/namespace/NamespaceStateManager.java     |   4 +-
 .../hbase/procedure/MasterProcedureManager.java    |   8 +-
 .../apache/hadoop/hbase/procedure/Procedure.java   |  14 +-
 .../hbase/procedure/ProcedureCoordinator.java      |  13 +-
 .../hbase/procedure/ProcedureCoordinatorRpcs.java  |   4 +-
 .../hadoop/hbase/procedure/ProcedureMember.java    |  12 +-
 .../procedure/RegionServerProcedureManager.java    |   4 +-
 .../hadoop/hbase/procedure/Subprocedure.java       |  12 +-
 .../hadoop/hbase/procedure/ZKProcedureUtil.java    |   2 +-
 .../RegionServerFlushTableProcedureManager.java    |  11 +-
 .../hbase/protobuf/ReplicationProtbufUtil.java     |   4 +-
 .../hbase/regionserver/AbstractMemStore.java       |   2 +-
 .../AnnotationReadingPriorityFunction.java         |  15 +-
 .../apache/hadoop/hbase/regionserver/CellSink.java |   2 +-
 .../hadoop/hbase/regionserver/ChunkCreator.java    |   2 +-
 .../hbase/regionserver/CompactingMemStore.java     |   2 +-
 .../hbase/regionserver/FavoredNodesForRegion.java  |   6 +-
 .../hadoop/hbase/regionserver/FlushRequester.java  |   8 +-
 .../hadoop/hbase/regionserver/HMobStore.java       |  16 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  84 +++++------
 .../hbase/regionserver/HRegionFileSystem.java      |  51 ++++---
 .../hadoop/hbase/regionserver/HRegionServer.java   |  12 +-
 .../hadoop/hbase/regionserver/HStoreFile.java      |   9 +-
 .../hadoop/hbase/regionserver/HeapMemoryTuner.java |   4 +-
 .../hadoop/hbase/regionserver/InternalScan.java    |   2 +-
 .../hadoop/hbase/regionserver/InternalScanner.java |   6 +-
 .../hadoop/hbase/regionserver/KeyValueHeap.java    |  14 +-
 .../hadoop/hbase/regionserver/KeyValueScanner.java |   2 +-
 .../apache/hadoop/hbase/regionserver/MemStore.java |  17 ++-
 .../hadoop/hbase/regionserver/MemStoreFlusher.java |  20 ++-
 .../regionserver/MiniBatchOperationInProgress.java |  16 +-
 .../MultiVersionConcurrencyControl.java            |   4 +-
 .../hbase/regionserver/MutableOnlineRegions.java   |   2 +-
 .../hadoop/hbase/regionserver/OnlineRegions.java   |   8 +-
 .../hadoop/hbase/regionserver/OperationStatus.java |   9 +-
 .../hadoop/hbase/regionserver/RSRpcServices.java   |  32 ++--
 .../apache/hadoop/hbase/regionserver/Region.java   |  33 ++--
 .../hbase/regionserver/RegionCoprocessorHost.java  |  36 ++---
 .../hadoop/hbase/regionserver/RegionScanner.java   |   2 +-
 .../hbase/regionserver/RegionSplitPolicy.java      |   3 +-
 .../hbase/regionserver/ReplicationSinkService.java |   1 -
 .../hbase/regionserver/ReversedKeyValueHeap.java   |   8 +-
 .../regionserver/ReversedRegionScannerImpl.java    |   3 +-
 .../hbase/regionserver/ReversedStoreScanner.java   |   5 +-
 .../hadoop/hbase/regionserver/RowProcessor.java    |   4 +-
 .../apache/hadoop/hbase/regionserver/ScanInfo.java |   4 +-
 .../hadoop/hbase/regionserver/ScannerContext.java  |  32 ++--
 .../hbase/regionserver/SecureBulkLoadManager.java  |   4 +-
 .../hadoop/hbase/regionserver/SegmentFactory.java  |   2 +-
 .../hadoop/hbase/regionserver/ShipperListener.java |   2 +-
 .../hadoop/hbase/regionserver/ShutdownHook.java    |   6 +-
 .../apache/hadoop/hbase/regionserver/Store.java    |   2 +-
 .../hadoop/hbase/regionserver/StoreFileInfo.java   |   4 +-
 .../hbase/regionserver/StoreFileManager.java       |   4 +-
 .../hadoop/hbase/regionserver/StoreFileReader.java |   6 +-
 .../hbase/regionserver/StoreFileScanner.java       |   4 +-
 .../hbase/regionserver/StoreFlushContext.java      |   6 +-
 .../hadoop/hbase/regionserver/StoreFlusher.java    |   4 +-
 .../hadoop/hbase/regionserver/StoreScanner.java    |  19 +--
 .../hbase/regionserver/TimeRangeTracker.java       |   2 +-
 .../compactions/CompactionProgress.java            |   2 +-
 .../regionserver/compactions/DefaultCompactor.java |   2 +-
 .../compactions/ExploringCompactionPolicy.java     |   8 +-
 .../compactions/SortedCompactionPolicy.java        |   4 +-
 .../regionserver/querymatcher/ColumnTracker.java   |   7 +-
 .../querymatcher/ScanDeleteTracker.java            |   2 +-
 .../querymatcher/ScanQueryMatcher.java             |   6 +-
 .../querymatcher/ScanWildcardColumnTracker.java    |   2 +-
 .../snapshot/RegionServerSnapshotManager.java      |  16 +-
 .../hbase/regionserver/wal/AbstractFSWAL.java      |   4 +-
 .../hbase/regionserver/wal/ProtobufLogReader.java  |   2 +-
 .../regionserver/wal/SequenceIdAccounting.java     |   7 +-
 .../hadoop/hbase/regionserver/wal/SyncFuture.java  |   2 +-
 .../hbase/regionserver/wal/WALActionsListener.java |   7 +-
 .../replication/HBaseReplicationEndpoint.java      |   2 +-
 .../regionserver/DumpReplicationQueues.java        |   2 +-
 .../replication/regionserver/MetricsSink.java      |  10 +-
 .../replication/regionserver/MetricsSource.java    |  18 +--
 .../replication/regionserver/Replication.java      |   1 -
 .../replication/regionserver/ReplicationLoad.java  |   2 +-
 .../replication/regionserver/ReplicationSink.java  |  10 +-
 .../regionserver/ReplicationSinkManager.java       |   6 +-
 .../regionserver/ReplicationSourceManager.java     |   2 +-
 .../hbase/security/access/AccessChecker.java       |   2 +-
 .../hbase/security/access/AccessController.java    |   8 +-
 .../hadoop/hbase/security/access/AuthManager.java  |   2 +-
 .../hbase/security/access/ZKPermissionWatcher.java |   4 +-
 .../DefaultVisibilityLabelServiceImpl.java         |   5 +-
 .../security/visibility/ScanLabelGenerator.java    |   3 +-
 .../security/visibility/VisibilityController.java  |   4 +-
 .../visibility/VisibilityLabelService.java         |  43 +++---
 .../visibility/VisibilityLabelServiceManager.java  |   5 +-
 .../security/visibility/VisibilityLabelsCache.java |   6 +-
 .../hbase/security/visibility/VisibilityUtils.java |  27 ++--
 .../visibility/ZKVisibilityLabelWatcher.java       |   4 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java      |   6 +-
 .../hbase/snapshot/SnapshotDescriptionUtils.java   |   3 +-
 .../org/apache/hadoop/hbase/util/BloomContext.java |   4 +-
 .../org/apache/hadoop/hbase/util/BloomFilter.java  |   4 +-
 .../apache/hadoop/hbase/util/BloomFilterChunk.java |   2 +-
 .../hadoop/hbase/util/BloomFilterFactory.java      |  18 +--
 .../apache/hadoop/hbase/util/BloomFilterUtil.java  |  27 ++--
 .../hadoop/hbase/util/DirectMemoryUtils.java       |   4 +-
 .../apache/hadoop/hbase/util/EncryptionTest.java   |   3 +-
 .../hadoop/hbase/util/FSTableDescriptors.java      |   4 +-
 .../java/org/apache/hadoop/hbase/util/FSUtils.java |  56 ++++---
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |  20 +--
 .../apache/hadoop/hbase/util/HBaseFsckRepair.java  |   2 +-
 .../apache/hadoop/hbase/util/JVMClusterUtil.java   |  11 +-
 .../hadoop/hbase/util/ModifyRegionUtils.java       |   8 +-
 .../hadoop/hbase/util/MunkresAssignment.java       |   2 +-
 .../org/apache/hadoop/hbase/util/RegionMover.java  |   6 +-
 .../hadoop/hbase/util/RegionSplitCalculator.java   |   2 +-
 .../apache/hadoop/hbase/util/RegionSplitter.java   |  28 ++--
 .../hadoop/hbase/util/RollingStatCalculator.java   |  10 +-
 .../apache/hadoop/hbase/util/ZKDataMigrator.java   |   3 +-
 .../hbase/util/hbck/HFileCorruptionChecker.java    |  22 +--
 .../main/java/org/apache/hadoop/hbase/wal/WAL.java |  10 +-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java    |  12 +-
 .../apache/hadoop/hbase/wal/WALPrettyPrinter.java  |  53 ++++---
 .../java/org/apache/hadoop/hbase/HBaseCluster.java |   2 +-
 .../org/apache/hadoop/hbase/HBaseTestCase.java     |  21 +--
 .../apache/hadoop/hbase/HBaseTestingUtility.java   | 166 ++++++++++++---------
 .../hadoop/hbase/HFilePerformanceEvaluation.java   |  16 +-
 .../org/apache/hadoop/hbase/MetaMockingUtil.java   |   6 +-
 .../org/apache/hadoop/hbase/MiniHBaseCluster.java  |  32 ++--
 .../hadoop/hbase/TestGlobalMemStoreSize.java       |   4 +-
 .../TestHColumnDescriptorDefaultVersions.java      |   2 +-
 .../org/apache/hadoop/hbase/TestMultiVersions.java |   2 +-
 .../hbase/TestPartialResultsFromClientSide.java    |  28 ++--
 .../apache/hadoop/hbase/TestRegionRebalancing.java |   2 +-
 .../org/apache/hadoop/hbase/TestSerialization.java |   2 +-
 .../org/apache/hadoop/hbase/TimestampTestBase.java |  11 +-
 .../hadoop/hbase/backup/TestHFileArchiving.java    |   6 +-
 .../hadoop/hbase/client/FromClientSideBase.java    |   8 +-
 .../hbase/client/HConnectionTestingUtility.java    |   6 +-
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |  12 +-
 .../hadoop/hbase/client/TestClientTimeouts.java    |   2 +-
 .../hbase/client/TestConnectionImplementation.java |   6 +-
 .../hadoop/hbase/client/TestEnableTable.java       |   2 +-
 .../hadoop/hbase/client/TestFromClientSide3.java   |   2 +-
 .../client/TestFromClientSideScanExcpetion.java    |   2 +-
 .../hbase/client/TestIntraRowPagination.java       |   2 +-
 .../apache/hadoop/hbase/client/TestMetaCache.java  |   2 +-
 .../org/apache/hadoop/hbase/client/TestResult.java |   5 +-
 .../hadoop/hbase/client/TestScannerTimeout.java    |   6 +-
 .../hbase/client/TestScannersFromClientSide.java   |  12 +-
 .../hadoop/hbase/client/TestSizeFailures.java      |   2 +-
 .../hbase/client/TestSmallReversedScanner.java     |   4 +-
 .../hbase/client/TestSnapshotFromClient.java       |   5 +-
 .../hadoop/hbase/client/TestSnapshotMetadata.java  |   2 +-
 .../hadoop/hbase/client/TestTimestampsFilter.java  |   4 +-
 .../hbase/client/locking/TestEntityLocks.java      |   2 +-
 .../client/replication/TestReplicationAdmin.java   |   3 +-
 .../hadoop/hbase/constraint/TestConstraint.java    |  10 +-
 .../hadoop/hbase/constraint/TestConstraints.java   |   6 +-
 .../coprocessor/TestCoreMasterCoprocessor.java     |   2 +-
 .../coprocessor/TestCoreRegionCoprocessor.java     |   2 +-
 .../TestCoreRegionServerCoprocessor.java           |   2 +-
 .../coprocessor/TestOpenTableInCoprocessor.java    |   2 +-
 .../coprocessor/TestRegionObserverBypass.java      |   4 +-
 ...ObserverForAddingMutationsFromCoprocessors.java |   2 +-
 .../coprocessor/TestRegionObserverInterface.java   |   2 +-
 .../hbase/filter/TestColumnPaginationFilter.java   |   4 +-
 .../hbase/filter/TestDependentColumnFilter.java    |   4 +-
 .../org/apache/hadoop/hbase/filter/TestFilter.java |   8 +-
 .../apache/hadoop/hbase/filter/TestFilterList.java |  14 +-
 .../filter/TestFilterListOrOperatorWithBlkCnt.java |   6 +-
 .../TestFirstKeyValueMatchingQualifiersFilter.java |   2 +-
 .../hbase/filter/TestInclusiveStopFilter.java      |   4 +-
 .../hbase/filter/TestMultiRowRangeFilter.java      |   6 +-
 .../apache/hadoop/hbase/filter/TestPageFilter.java |   4 +-
 .../hadoop/hbase/filter/TestRandomRowFilter.java   |   4 +-
 .../filter/TestSingleColumnValueExcludeFilter.java |   2 +-
 .../hbase/filter/TestSingleColumnValueFilter.java  |   4 +-
 .../hadoop/hbase/io/TestHalfStoreFileReader.java   |   2 +-
 .../org/apache/hadoop/hbase/io/TestHeapSize.java   |   2 +-
 .../hbase/io/encoding/TestDataBlockEncoders.java   |   6 +-
 .../apache/hadoop/hbase/io/hfile/NanoTimer.java    |   6 +-
 .../hadoop/hbase/io/hfile/RandomDistribution.java  |  15 +-
 .../hadoop/hbase/io/hfile/RandomKeyValueUtil.java  |   3 +-
 .../apache/hadoop/hbase/io/hfile/TestHFile.java    |   4 +-
 .../hadoop/hbase/io/hfile/TestHFileBlockIndex.java |   3 +-
 .../hbase/io/hfile/TestHFileDataBlockEncoder.java  |   4 +-
 .../hbase/io/hfile/bucket/TestBucketCache.java     |   3 +-
 .../io/hfile/bucket/TestBucketCacheRefCnt.java     |   2 -
 .../io/hfile/bucket/TestBucketWriterThread.java    |   8 +-
 .../hadoop/hbase/master/MockRegionServer.java      |   5 +-
 .../hbase/master/TestActiveMasterManager.java      |   2 +-
 .../TestMasterFailoverBalancerPersistence.java     |   6 +-
 .../hadoop/hbase/master/TestMasterNoCluster.java   |   6 +-
 .../hadoop/hbase/master/TestMasterTransitions.java |  10 +-
 .../hadoop/hbase/master/TestRegionPlacement.java   |  10 +-
 .../hbase/master/balancer/BalancerTestBase.java    |   4 +-
 .../master/balancer/TestBaseLoadBalancer.java      |   7 +-
 .../master/balancer/TestSimpleLoadBalancer.java    |   2 +-
 .../hbase/master/janitor/TestCatalogJanitor.java   |   2 +-
 .../janitor/TestCatalogJanitorInMemoryStates.java  |   4 +-
 .../procedure/MasterProcedureTestingUtility.java   |   2 +-
 .../TestTableDescriptorModificationFromClient.java |   2 +-
 .../org/apache/hadoop/hbase/mob/MobTestUtil.java   |   2 +-
 .../hbase/procedure/SimpleRSProcedureManager.java  |   2 +-
 .../procedure/TestZKProcedureControllers.java      |   4 +-
 .../hadoop/hbase/protobuf/TestProtobufUtil.java    |  12 +-
 .../hbase/protobuf/TestReplicationProtobuf.java    |   2 +-
 .../hbase/regionserver/CreateRandomStoreFile.java  |   2 +-
 .../hbase/regionserver/DataBlockEncodingTool.java  |   4 +-
 .../hbase/regionserver/TestCompactingMemStore.java |  10 +-
 .../hadoop/hbase/regionserver/TestCompaction.java  |   4 +-
 .../hbase/regionserver/TestCompactionState.java    |   5 +-
 .../TestDateTieredCompactionPolicy.java            |   2 +-
 .../hbase/regionserver/TestDefaultMemStore.java    |  20 +--
 .../hbase/regionserver/TestDeleteMobTable.java     |   2 +-
 .../regionserver/TestGetClosestAtOrBefore.java     |   4 +-
 .../hadoop/hbase/regionserver/TestHMobStore.java   |  16 +-
 .../hadoop/hbase/regionserver/TestHRegion.java     |  29 ++--
 .../regionserver/TestHRegionReplayEvents.java      |   4 +-
 .../hadoop/hbase/regionserver/TestHStore.java      |   2 +-
 .../hadoop/hbase/regionserver/TestHStoreFile.java  |   2 +-
 .../hbase/regionserver/TestJoinedScanners.java     |   3 +-
 .../hbase/regionserver/TestMajorCompaction.java    |   4 +-
 .../regionserver/TestMasterAddressTracker.java     |   2 +-
 .../regionserver/TestPerColumnFamilyFlush.java     |   2 +-
 .../regionserver/TestRSKilledWhenInitializing.java |   2 +-
 .../hbase/regionserver/TestRegionIncrement.java    |   4 +-
 .../regionserver/TestRegionReplicaFailover.java    |   2 +-
 .../TestRegionServerOnlineConfigChange.java        |   4 +-
 .../regionserver/TestRequestsPerSecondMetric.java  |   2 +-
 .../hadoop/hbase/regionserver/TestRowTooBig.java   |   4 +-
 .../hadoop/hbase/regionserver/TestScanner.java     |   2 +-
 .../TestSplitTransactionOnCluster.java             |   7 +-
 .../hbase/regionserver/TestStoreScanner.java       |   4 +-
 .../querymatcher/TestUserScanQueryMatcher.java     |   6 +-
 .../hbase/regionserver/wal/AbstractTestFSWAL.java  |   8 +-
 .../regionserver/wal/AbstractTestProtobufLog.java  |   4 +-
 .../regionserver/wal/AbstractTestWALReplay.java    |  20 ++-
 .../hbase/regionserver/wal/TestLogRolling.java     |   2 +-
 .../regionserver/wal/TestLogRollingNoCluster.java  |   2 +-
 .../TestReplicationDisableInactivePeer.java        |   2 +-
 .../regionserver/TestReplicationSink.java          |  10 +-
 .../regionserver/TestReplicationSourceManager.java |   8 +-
 .../regionserver/TestWALEntrySinkFilter.java       |   2 +-
 .../hadoop/hbase/security/TestSecureIPC.java       |   6 +-
 .../TestUsersOperationsWithSecureHadoop.java       |   2 +-
 .../hbase/snapshot/MobSnapshotTestingUtils.java    |   3 +-
 .../hbase/snapshot/SnapshotTestingUtils.java       |   2 +-
 .../TestLoadIncrementalHFilesSplitRecovery.java    |   4 +-
 .../hadoop/hbase/util/BaseTestHBaseFsck.java       |  13 +-
 .../hadoop/hbase/util/HFileArchiveTestingUtil.java |   4 +-
 .../org/apache/hadoop/hbase/util/MockServer.java   |   2 +-
 .../hbase/util/ProcessBasedLocalHBaseCluster.java  |   2 +-
 .../org/apache/hadoop/hbase/util/TestFSUtils.java  |   2 +-
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java    |   2 +-
 .../hbase/util/test/LoadTestDataGenerator.java     |  10 +-
 .../apache/hadoop/hbase/wal/IOTestProvider.java    |   2 +-
 .../hadoop/hbase/wal/TestFSHLogProvider.java       |   7 +-
 .../apache/hadoop/hbase/wal/TestWALFactory.java    |   6 +-
 .../apache/hadoop/hbase/wal/TestWALMethods.java    |   2 +-
 .../hbase/wal/TestWALOpenAfterDNRollingStart.java  |   2 +-
 .../org/apache/hadoop/hbase/wal/TestWALSplit.java  |   4 +-
 .../hadoop/hbase/wal/WALPerformanceEvaluation.java |   5 +-
 .../hadoop/hbase/thrift/HBaseServiceHandler.java   |   2 +-
 .../hadoop/hbase/thrift/IncrementCoalescer.java    |   2 +-
 .../hbase/thrift/ThriftHBaseServiceHandler.java    |   2 +-
 .../hadoop/hbase/thrift/ThriftUtilities.java       |  20 +--
 .../hadoop/hbase/thrift/TestThriftServer.java      |  20 +--
 .../TestThriftHBaseServiceHandlerWithLabels.java   |   5 +-
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   |   4 +-
 .../hbase/zookeeper/RecoverableZooKeeper.java      |   5 +-
 .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java  |  30 ++--
 612 files changed, 2991 insertions(+), 2886 deletions(-)

diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
index ee02d42d2d3..469fc38ad19 100644
--- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
+++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
@@ -367,7 +367,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
        * Create a ByteString from byte array without copying (wrap), and then set it as the payload
        * for the builder.
        * @param builder builder for HDFS DataTransferEncryptorMessage.
-       * @param payload byte array of payload. n
+       * @param payload byte array of payload.
        */
       static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
         byte[] payload) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
index 414d4ee7b49..8029b0314cc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
@@ -102,8 +102,8 @@ public class AsyncMetaTableAccessor {
   }
 
   /**
-   * Returns the HRegionLocation from meta for the given region n * @param regionName region we're
-   * looking for
+   * Returns the HRegionLocation from meta for the given region
+   * @param regionName region we're looking for
    * @return HRegionLocation for the given region
    */
   public static CompletableFuture<Optional<HRegionLocation>>
@@ -128,8 +128,8 @@ public class AsyncMetaTableAccessor {
   }
 
   /**
-   * Returns the HRegionLocation from meta for the given encoded region name n * @param
-   * encodedRegionName region we're looking for
+   * Returns the HRegionLocation from meta for the given encoded region name
+   * @param encodedRegionName region we're looking for
    * @return HRegionLocation for the given region
    */
   public static CompletableFuture<Optional<HRegionLocation>>
@@ -176,8 +176,8 @@ public class AsyncMetaTableAccessor {
   }
 
   /**
-   * Used to get all region locations for the specific table. n * @param tableName table we're
-   * looking for, can be null for getting all regions
+   * Used to get all region locations for the specific table.
+   * @param tableName table we're looking for, can be null for getting all regions
    * @return the list of region locations. The return value will be wrapped by a
    *         {@link CompletableFuture}.
    */
@@ -200,8 +200,8 @@ public class AsyncMetaTableAccessor {
   }
 
   /**
-   * Used to get table regions' info and server. n * @param tableName table we're looking for, can
-   * be null for getting all regions
+   * Used to get table regions' info and server.
+   * @param tableName                   table we're looking for, can be null for getting all regions
    * @param excludeOfflinedSplitParents don't return split parents
    * @return the list of regioninfos and server. The return value will be wrapped by a
    *         {@link CompletableFuture}.
@@ -259,9 +259,10 @@ public class AsyncMetaTableAccessor {
   }
 
   /**
-   * Performs a scan of META table for given table. n * @param tableName table withing we scan
-   * @param type    scanned part of meta
-   * @param visitor Visitor invoked against each row
+   * Performs a scan of META table for given table.
+   * @param tableName table withing we scan
+   * @param type      scanned part of meta
+   * @param visitor   Visitor invoked against each row
    */
   private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
     TableName tableName, QueryType type, final Visitor visitor) {
@@ -270,11 +271,12 @@ public class AsyncMetaTableAccessor {
   }
 
   /**
-   * Performs a scan of META table for given table. n * @param startRow Where to start the scan
-   * @param stopRow Where to stop the scan
-   * @param type    scanned part of meta
-   * @param maxRows maximum rows to return
-   * @param visitor Visitor invoked against each row
+   * Performs a scan of META table for given table.
+   * @param startRow Where to start the scan
+   * @param stopRow  Where to stop the scan
+   * @param type     scanned part of meta
+   * @param maxRows  maximum rows to return
+   * @param visitor  Visitor invoked against each row
    */
   private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
     byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
index 9b7a5de19bd..3b29e286561 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
@@ -52,7 +52,8 @@ public class ClusterId {
 
   /**
    * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
-   * @return An instance of {@link ClusterId} made from <code>bytes</code> n * @see #toByteArray()
+   * @return An instance of {@link ClusterId} made from <code>bytes</code>
+   * @see #toByteArray()
    */
   public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
     if (ProtobufUtil.isPBMagicPrefix(bytes)) {
@@ -78,9 +79,7 @@ public class ClusterId {
     return builder.setClusterId(this.id).build();
   }
 
-  /**
-   * n * @return A {@link ClusterId} made from the passed in <code>cid</code>
-   */
+  /** Returns A {@link ClusterId} made from the passed in <code>cid</code> */
   public static ClusterId convert(final ClusterIdProtos.ClusterId cid) {
     return new ClusterId(cid.getClusterId());
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index f21ed0e837b..d3a0c64f4fb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -269,7 +269,7 @@ public class ClusterStatus implements ClusterMetrics {
   }
 
   /**
-   * n * @return Server's load or null if not found.
+   * @return Server's load or null if not found.
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link #getLiveServerMetrics} instead.
    */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 1ed87a38a4d..7b222e868bd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -358,8 +358,8 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
 
   /**
    * Set whether the tags should be compressed along with DataBlockEncoding. When no
-   * DataBlockEncoding is been used, this is having no effect. n * @return this (for chained
-   * invocation)
+   * DataBlockEncoding is been used, this is having no effect.
+   * @return this (for chained invocation)
    */
   public HColumnDescriptor setCompressTags(boolean value) {
     getDelegateeForModification().setCompressTags(value);
@@ -668,8 +668,8 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
 
   /**
    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
-   * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code> n * @see
-   *         #toByteArray()
+   * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
+   * @see #toByteArray()
    */
   public static HColumnDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
     ColumnFamilyDescriptor desc = ColumnFamilyDescriptorBuilder.parseFrom(bytes);
@@ -713,7 +713,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
   }
 
   /**
-   * Set the encryption algorithm for use with this family n
+   * Set the encryption algorithm for use with this family
    */
   public HColumnDescriptor setEncryptionType(String value) {
     getDelegateeForModification().setEncryptionType(value);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 651ce492d39..d89de53d1ff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -93,7 +93,7 @@ public class HRegionInfo implements RegionInfo {
   private static final int MAX_REPLICA_ID = 0xFFFF;
 
   /**
-   * n * @return the encodedName
+   * @return the encodedName
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#encodeRegionName(byte[])}.
    */
@@ -211,7 +211,7 @@ public class HRegionInfo implements RegionInfo {
    * Construct HRegionInfo with explicit parameters
    * @param tableName the table name
    * @param startKey  first key in region
-   * @param endKey    end of key range n
+   * @param endKey    end of key range
    */
   public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey)
     throws IllegalArgumentException {
@@ -224,7 +224,7 @@ public class HRegionInfo implements RegionInfo {
    * @param startKey  first key in region
    * @param endKey    end of key range
    * @param split     true if this region has split and we have daughter regions regions that may or
-   *                  may not hold references to this region. n
+   *                  may not hold references to this region.
    */
   public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
     final boolean split) throws IllegalArgumentException {
@@ -238,7 +238,7 @@ public class HRegionInfo implements RegionInfo {
    * @param endKey    end of key range
    * @param split     true if this region has split and we have daughter regions regions that may or
    *                  may not hold references to this region.
-   * @param regionid  Region id to use. n
+   * @param regionid  Region id to use.
    */
   public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
     final boolean split, final long regionid) throws IllegalArgumentException {
@@ -253,7 +253,7 @@ public class HRegionInfo implements RegionInfo {
    * @param split     true if this region has split and we have daughter regions regions that may or
    *                  may not hold references to this region.
    * @param regionid  Region id to use.
-   * @param replicaId the replicaId to use n
+   * @param replicaId the replicaId to use
    */
   public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
     final boolean split, final long regionid, final int replicaId) throws IllegalArgumentException {
@@ -279,7 +279,7 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Costruct a copy of another HRegionInfo n
+   * Costruct a copy of another HRegionInfo
    */
   public HRegionInfo(RegionInfo other) {
     super();
@@ -302,7 +302,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Make a region name of passed parameters. n * @param startKey Can be null
+   * Make a region name of passed parameters.
+   * @param startKey  Can be null
    * @param regionid  Region id (Usually timestamp from when region was created).
    * @param newFormat should we create the region name in the new format (such that it contains its
    *                  encoded name?).
@@ -318,7 +319,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Make a region name of passed parameters. n * @param startKey Can be null
+   * Make a region name of passed parameters.
+   * @param startKey  Can be null
    * @param id        Region id (Usually timestamp from when region was created).
    * @param newFormat should we create the region name in the new format (such that it contains its
    *                  encoded name?).
@@ -334,10 +336,11 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Make a region name of passed parameters. n * @param startKey Can be null
-   * @param regionid Region id (Usually timestamp from when region was created). n * @param
-   *                 newFormat should we create the region name in the new format (such that it
-   *                 contains its encoded name?).
+   * Make a region name of passed parameters.
+   * @param startKey  Can be null
+   * @param regionid  Region id (Usually timestamp from when region was created).
+   * @param newFormat should we create the region name in the new format (such that it contains its
+   *                  encoded name?).
    * @return Region name made of passed tableName, startKey, id and replicaId
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link RegionInfo#createRegionName(TableName, byte[], long, int, boolean)}.
@@ -351,7 +354,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Make a region name of passed parameters. n * @param startKey Can be null
+   * Make a region name of passed parameters.
+   * @param startKey  Can be null
    * @param id        Region id (Usually timestamp from when region was created).
    * @param newFormat should we create the region name in the new format (such that it contains its
    *                  encoded name?).
@@ -367,9 +371,10 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Make a region name of passed parameters. n * @param startKey Can be null
-   * @param id Region id (Usually timestamp from when region was created). n * @param newFormat
-   *           should we create the region name in the new format
+   * Make a region name of passed parameters.
+   * @param startKey  Can be null
+   * @param id        Region id (Usually timestamp from when region was created).
+   * @param newFormat should we create the region name in the new format
    * @return Region name made of passed tableName, startKey, id and replicaId
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link RegionInfo#createRegionName(TableName, byte[], byte[], int, boolean)}.
@@ -394,7 +399,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Gets the start key from the specified region name. n * @return Start key.
+   * Gets the start key from the specified region name.
+   * @return Start key.
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#getStartKey(byte[])}.
    */
@@ -404,9 +410,10 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Separate elements of a regionName. n * @return Array of byte[] containing tableName, startKey
-   * and id n * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
-   * {@link RegionInfo#parseRegionName(byte[])}.
+   * Separate elements of a regionName.
+   * @return Array of byte[] containing tableName, startKey and id
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
+   *             {@link RegionInfo#parseRegionName(byte[])}.
    */
   @Deprecated
   @InterfaceAudience.Private
@@ -415,9 +422,9 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * n * @return if region name is encoded. n * @deprecated As of release 2.0.0, this will be
-   * removed in HBase 3.0.0 Use
-   * {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}.
+   * @return if region name is encoded.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
+   *             {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}.
    */
   @Deprecated
   public static boolean isEncodedRegionName(byte[] regionName) throws IOException {
@@ -483,7 +490,7 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get current table name of the region n
+   * Get current table name of the region
    */
   @Override
   public TableName getTable() {
@@ -725,7 +732,8 @@ public class HRegionInfo implements RegionInfo {
 
   /**
    * @param bytes A pb RegionInfo serialized with a pb magic prefix.
-   * @return A deserialized {@link HRegionInfo} n * @see #toByteArray()
+   * @return A deserialized {@link HRegionInfo}
+   * @see #toByteArray()
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[])}.
    */
@@ -738,7 +746,8 @@ public class HRegionInfo implements RegionInfo {
    * @param bytes  A pb RegionInfo serialized with a pb magic prefix.
    * @param offset starting point in the byte array
    * @param len    length to read on the byte array
-   * @return A deserialized {@link HRegionInfo} n * @see #toByteArray()
+   * @return A deserialized {@link HRegionInfo}
+   * @see #toByteArray()
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[], int, int)}.
    */
@@ -763,8 +772,8 @@ public class HRegionInfo implements RegionInfo {
   /**
    * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use the pb
    * mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want).
-   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n * @see
-   *         #toByteArray()
+   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
+   * @see #toByteArray()
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link RegionInfo#toDelimitedByteArray(RegionInfo)}.
    */
@@ -774,8 +783,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally nn
-   * * @return descriptive string
+   * Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally
+   * @return descriptive string
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getDescriptiveNameFromRegionStateForDisplay(RegionState,
    *             Configuration) over in hbase-server module.
@@ -788,7 +797,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the end key for display. Optionally hide the real end key. nn * @return the endkey
+   * Get the end key for display. Optionally hide the real end key.
+   * @return the endkey
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getEndKeyForDisplay(RegionInfo, Configuration) over in
    *             hbase-server module.
@@ -800,7 +810,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the start key for display. Optionally hide the real start key. nn * @return the startkey
+   * Get the start key for display. Optionally hide the real start key.
+   * @return the startkey
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getStartKeyForDisplay(RegionInfo, Configuration) over in
    *             hbase-server module.
@@ -812,8 +823,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the region name for display. Optionally hide the start key. nn * @return region name as
-   * String
+   * Get the region name for display. Optionally hide the start key.
+   * @return region name as String
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getRegionNameAsStringForDisplay(RegionInfo, Configuration) over
    *             in hbase-server module.
@@ -825,7 +836,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the region name for display. Optionally hide the start key. nn * @return region name bytes
+   * Get the region name for display. Optionally hide the start key.
+   * @return region name bytes
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getRegionNameForDisplay(RegionInfo, Configuration) over in
    *             hbase-server module.
@@ -838,9 +850,10 @@ public class HRegionInfo implements RegionInfo {
 
   /**
    * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was
-   * serialized to the stream with {@link #toDelimitedByteArray()} n * @return An instance of
-   * HRegionInfo. n * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
-   * {@link RegionInfo#parseFrom(DataInputStream)}.
+   * serialized to the stream with {@link #toDelimitedByteArray()}
+   * @return An instance of HRegionInfo.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
+   *             {@link RegionInfo#parseFrom(DataInputStream)}.
    */
   @Deprecated
   @InterfaceAudience.Private
@@ -868,8 +881,8 @@ public class HRegionInfo implements RegionInfo {
    * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can be
    * used to read back the instances.
    * @param infos HRegionInfo objects to serialize
-   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n * @see
-   *         #toByteArray()
+   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
+   * @see #toByteArray()
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link RegionInfo#toDelimitedByteArray(RegionInfo...)}.
    */
@@ -910,7 +923,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Check whether two regions are adjacent nn * @return true if two regions are adjacent
+   * Check whether two regions are adjacent
+   * @return true if two regions are adjacent
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#areAdjacent(RegionInfo, RegionInfo)}.
    */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index 6ae93bb3954..7a431535ec3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -82,7 +82,7 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
   }
 
   /**
-   * @return Immutable HRegionInfo
+   * Returns immutable HRegionInfo
    * @deprecated Since 2.0.0. Will remove in 3.0.0. Use {@link #getRegion()}} instead.
    */
   @Deprecated
@@ -90,9 +90,7 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
     return regionInfo == null ? null : new ImmutableHRegionInfo(regionInfo);
   }
 
-  /**
-   * n
-   */
+  /** Returns regionInfo */
   public RegionInfo getRegion() {
     return regionInfo;
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index 3cd43ae70fd..c6af8e6ac53 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -353,7 +353,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
   }
 
   /**
-   * Get the name of the table n
+   * Get the name of the table
    */
   @Override
   public TableName getTableName() {
@@ -715,7 +715,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. It won't check if the class can be
    * loaded or not. Whether a coprocessor is loadable or not will be determined when a region is
    * opened.
-   * @param className Full class name. n
+   * @param className Full class name.
    */
   public HTableDescriptor addCoprocessor(String className) throws IOException {
     getDelegateeForModification().setCoprocessor(className);
@@ -731,7 +731,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    *                    classloader.
    * @param className   Full class name.
    * @param priority    Priority
-   * @param kvs         Arbitrary key-value parameter pairs passed into the coprocessor. n
+   * @param kvs         Arbitrary key-value parameter pairs passed into the coprocessor.
    */
   public HTableDescriptor addCoprocessor(String className, Path jarFilePath, int priority,
     final Map<String, String> kvs) throws IOException {
@@ -747,7 +747,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * loaded or not. Whether a coprocessor is loadable or not will be determined when a region is
    * opened.
    * @param specStr The Coprocessor specification all in in one String formatted so matches
-   *                {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN} n
+   *                {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
    */
   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
     getDelegateeForModification().setCoprocessorWithSpec(specStr);
@@ -828,8 +828,8 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
 
   /**
    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
-   * @return An instance of {@link HTableDescriptor} made from <code>bytes</code> nn * @see
-   *         #toByteArray()
+   * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
+   * @see #toByteArray()
    */
   public static HTableDescriptor parseFrom(final byte[] bytes)
     throws DeserializationException, IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 01b5f49a205..69e59b5b3c3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -643,7 +643,7 @@ public class MetaTableAccessor {
   /**
    * @param connection connection we're using
    * @param serverName server whose regions we're interested in
-   * @return List of user regions installed on this server (does not include catalog regions). n
+   * @return List of user regions installed on this server (does not include catalog regions).
    */
   public static NavigableMap<RegionInfo, Result> getServerUserRegions(Connection connection,
     final ServerName serverName) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
index a15833ac17a..bc156353a1b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
@@ -34,8 +34,7 @@ public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException {
   }
 
   /**
-   * n
-   */
+   *   */
   public NotAllMetaRegionsOnlineException(String message) {
     super(message);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index e7c267dc332..58fe5800051 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -392,7 +392,7 @@ public class ServerLoad implements ServerMetrics {
   }
 
   /**
-   * Call directly from client such as hbase shell n
+   * Call directly from client such as hbase shell
    */
   @Override
   public ReplicationLoadSink getReplicationLoadSink() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
index a177ecbf3e9..4ba044056b5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -70,7 +70,7 @@ public interface ServerMetrics {
   Map<String, List<ReplicationLoadSource>> getReplicationLoadSourceMap();
 
   /**
-   * Call directly from client such as hbase shell n
+   * Call directly from client such as hbase shell
    */
   @Nullable
   ReplicationLoadSink getReplicationLoadSink();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index e6698afd9a2..4e202afc61a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -532,9 +532,10 @@ public interface Admin extends Abortable, Closeable {
    * Disable table and wait on completion. May timeout eventually. Use
    * {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
    * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
-   * enabled state for it to be disabled. n * @throws IOException There could be couple types of
-   * IOException TableNotFoundException means the table doesn't exist. TableNotEnabledException
-   * means the table isn't in enabled state.
+   * enabled state for it to be disabled.
+   * @throws IOException There could be couple types of IOException TableNotFoundException means the
+   *                     table doesn't exist. TableNotEnabledException means the table isn't in
+   *                     enabled state.
    */
   default void disableTable(TableName tableName) throws IOException {
     get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
@@ -905,7 +906,7 @@ public interface Admin extends Abortable, Closeable {
    * then it returns. It does not wait on the completion of Compaction (it can take a while).
    * @param tableName   table to compact
    * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   void compact(TableName tableName, CompactType compactType)
     throws IOException, InterruptedException;
@@ -917,7 +918,7 @@ public interface Admin extends Abortable, Closeable {
    * @param tableName    table to compact
    * @param columnFamily column family within a table
    * @param compactType  {@link org.apache.hadoop.hbase.client.CompactType}
-   * @throws IOException if not a mob column family or if a remote or network exception occurs n
+   * @throws IOException if not a mob column family or if a remote or network exception occurs
    */
   void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
     throws IOException, InterruptedException;
@@ -966,7 +967,7 @@ public interface Admin extends Abortable, Closeable {
    * while).
    * @param tableName   table to compact
    * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   void majorCompact(TableName tableName, CompactType compactType)
     throws IOException, InterruptedException;
@@ -978,7 +979,7 @@ public interface Admin extends Abortable, Closeable {
    * @param tableName    table to compact
    * @param columnFamily column family within a table
    * @param compactType  {@link org.apache.hadoop.hbase.client.CompactType}
-   * @throws IOException if not a mob column family or if a remote or network exception occurs n
+   * @throws IOException if not a mob column family or if a remote or network exception occurs
    */
   void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType)
     throws IOException, InterruptedException;
@@ -989,10 +990,10 @@ public interface Admin extends Abortable, Closeable {
    * can take a while).
    * @param sn    the region server name
    * @param major if it's major compaction
-   * @throws IOException if a remote or network exception occurs n * @deprecated As of release
-   *                     2.0.0, this will be removed in HBase 3.0.0. Use
-   *                     {@link #compactRegionServer(ServerName)} or
-   *                     {@link #majorCompactRegionServer(ServerName)}.
+   * @throws IOException if a remote or network exception occurs
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
+   *             {@link #compactRegionServer(ServerName)} or
+   *             {@link #majorCompactRegionServer(ServerName)}.
    */
   @Deprecated
   default void compactRegionServer(ServerName sn, boolean major)
@@ -2562,7 +2563,7 @@ public interface Admin extends Abortable, Closeable {
 
   /**
    * Return the set of supported security capabilities.
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   List<SecurityCapability> getSecurityCapabilities() throws IOException;
 
@@ -2906,7 +2907,7 @@ public interface Admin extends Abortable, Closeable {
    * Clear compacting queues on a regionserver.
    * @param serverName the region server name
    * @param queues     the set of queue name
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   void clearCompactionQueues(ServerName serverName, Set<String> queues)
     throws IOException, InterruptedException;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index bf1a29196db..6104fcee87b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -61,7 +61,7 @@ public class Append extends Mutation {
    * <p>
    * This range is used as [minStamp, maxStamp).
    * @param minStamp minimum timestamp value, inclusive
-   * @param maxStamp maximum timestamp value, exclusive n
+   * @param maxStamp maximum timestamp value, exclusive
    */
   public Append setTimeRange(long minStamp, long maxStamp) {
     tr = new TimeRange(minStamp, maxStamp);
@@ -69,7 +69,7 @@ public class Append extends Mutation {
   }
 
   /**
-   * Gets the TimeRange used for this append. n
+   * Gets the TimeRange used for this append.
    */
   public TimeRange getTimeRange() {
     return this.tr;
@@ -81,7 +81,7 @@ public class Append extends Mutation {
   }
 
   /**
-   * n * True (default) if the append operation should return the results. A client that is not
+   * True (default) if the append operation should return the results. A client that is not
    * interested in the result can save network bandwidth setting this to false.
    */
   @Override
@@ -120,7 +120,7 @@ public class Append extends Mutation {
    * Create a Append operation for the specified row.
    * <p>
    * At least one column must be appended to.
-   * @param rowArray Makes a copy out of this buffer. nn
+   * @param rowArray Makes a copy out of this buffer.
    */
   public Append(final byte[] rowArray, final int rowOffset, final int rowLength) {
     checkRow(rowArray, rowOffset, rowLength);
@@ -142,9 +142,9 @@ public class Append extends Mutation {
    * Add the specified column and value to this Append operation.
    * @param family    family name
    * @param qualifier column qualifier
-   * @param value     value to append to specified column n * @deprecated As of release 2.0.0, this
-   *                  will be removed in HBase 3.0.0. Use {@link #addColumn(byte[], byte[], byte[])}
-   *                  instead
+   * @param value     value to append to specified column
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
+   *             {@link #addColumn(byte[], byte[], byte[])} instead
    */
   @Deprecated
   public Append add(byte[] family, byte[] qualifier, byte[] value) {
@@ -155,7 +155,7 @@ public class Append extends Mutation {
    * Add the specified column and value to this Append operation.
    * @param family    family name
    * @param qualifier column qualifier
-   * @param value     value to append to specified column n
+   * @param value     value to append to specified column
    */
   public Append addColumn(byte[] family, byte[] qualifier, byte[] value) {
     KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, KeyValue.Type.Put, value);
@@ -163,7 +163,8 @@ public class Append extends Mutation {
   }
 
   /**
-   * Add column and value to this Append operation. n * @return This instance
+   * Add column and value to this Append operation.
+   * @return This instance
    */
   @SuppressWarnings("unchecked")
   public Append add(final Cell cell) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index d1008e1c782..71000a4c0d2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -198,7 +198,7 @@ public interface AsyncAdmin {
   CompletableFuture<Void> enableTable(TableName tableName);
 
   /**
-   * Disable a table. The table has to be in enabled state for it to be disabled. n
+   * Disable a table. The table has to be in enabled state for it to be disabled.
    */
   CompletableFuture<Void> disableTable(TableName tableName);
 
@@ -1098,7 +1098,7 @@ public interface AsyncAdmin {
   CompletableFuture<Void> stopMaster();
 
   /**
-   * Stop the designated regionserver. n
+   * Stop the designated regionserver.
    */
   CompletableFuture<Void> stopRegionServer(ServerName serverName);
 
@@ -1126,19 +1126,20 @@ public interface AsyncAdmin {
   CompletableFuture<Void> rollWALWriter(ServerName serverName);
 
   /**
-   * Clear compacting queues on a region server. n * @param queues the set of queue name
+   * Clear compacting queues on a region server.
+   * @param queues the set of queue name
    */
   CompletableFuture<Void> clearCompactionQueues(ServerName serverName, Set<String> queues);
 
   /**
-   * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver. n * @return a
-   * list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
+   * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver.
+   * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
    */
   CompletableFuture<List<RegionMetrics>> getRegionMetrics(ServerName serverName);
 
   /**
-   * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver for a table. nn
-   * * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
+   * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver for a table.
+   * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
    */
   CompletableFuture<List<RegionMetrics>> getRegionMetrics(ServerName serverName,
     TableName tableName);
@@ -1285,8 +1286,8 @@ public interface AsyncAdmin {
   CompletableFuture<Boolean> normalize(NormalizeTableFilterParams ntfp);
 
   /**
-   * Turn the cleaner chore on/off. n * @return Previous cleaner state wrapped by a
-   * {@link CompletableFuture}
+   * Turn the cleaner chore on/off.
+   * @return Previous cleaner state wrapped by a {@link CompletableFuture}
    */
   CompletableFuture<Boolean> cleanerChoreSwitch(boolean on);
 
@@ -1305,8 +1306,8 @@ public interface AsyncAdmin {
   CompletableFuture<Boolean> runCleanerChore();
 
   /**
-   * Turn the catalog janitor on/off. n * @return the previous state wrapped by a
-   * {@link CompletableFuture}
+   * Turn the catalog janitor on/off.
+   * @return the previous state wrapped by a {@link CompletableFuture}
    */
   CompletableFuture<Boolean> catalogJanitorSwitch(boolean on);
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 7acaa809d99..2e5d1813aa6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -180,7 +180,7 @@ class AsyncConnectionImpl implements AsyncConnection {
   }
 
   /**
-   * If choreService has not been created yet, create the ChoreService. n
+   * If choreService has not been created yet, create the ChoreService.
    */
   synchronized ChoreService getChoreService() {
     if (isClosed()) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 77d0d985467..65e5611db6b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -203,7 +203,7 @@ class AsyncProcess {
    * The submitted task may be not accomplished at all if there are too many running tasks or other
    * limits.
    * @param <CResult> The class to cast the result
-   * @param task      The setting and data n
+   * @param task      The setting and data
    */
   public <CResult> AsyncRequestFuture submit(AsyncProcessTask<CResult> task)
     throws InterruptedIOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 1cb3b1cfe8d..b5a9fd03130 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -91,7 +91,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
    * @param conf       The {@link Configuration} to use.
    * @param scan       {@link Scan} to use in this scanner
    * @param tableName  The table that we wish to scan
-   * @param connection Connection identifying the cluster n
+   * @param connection Connection identifying the cluster
    */
   public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
     ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index e2b5bdbbc72..30de5370e74 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -55,8 +55,8 @@ public interface ClusterConnection extends Connection {
   /**
    * Use this api to check if the table has been created with the specified number of splitkeys
    * which was used while creating the given table. Note : If this api is used after a table's
-   * region gets splitted, the api may return false. n * tableName n * splitKeys used while creating
-   * table n * if a remote or network exception occurs
+   * region gets splitted, the api may return false. tableName splitKeys used while creating table
+   * if a remote or network exception occurs
    */
   boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException;
 
@@ -255,7 +255,7 @@ public interface ClusterConnection extends Connection {
    * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. This
    * RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be
    * intercepted with the configured {@link RetryingCallerInterceptor}
-   * @param conf configuration n
+   * @param conf configuration
    */
   RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf);
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
index 4e025eabe66..34cf3f97962 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
@@ -126,7 +126,7 @@ public interface ColumnFamilyDescriptor {
   int getMinVersions();
 
   /**
-   * Get the mob compact partition policy for this family n
+   * Get the mob compact partition policy for this family
    */
   MobCompactPartitionPolicy getMobCompactPartitionPolicy();
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index 92a69628d14..e80e12c7c28 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -816,8 +816,8 @@ public class ColumnFamilyDescriptorBuilder {
 
     /**
      * Set whether the tags should be compressed along with DataBlockEncoding. When no
-     * DataBlockEncoding is been used, this is having no effect. n * @return this (for chained
-     * invocation)
+     * DataBlockEncoding is been used, this is having no effect.
+     * @return this (for chained invocation)
      */
     public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) {
       return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags));
@@ -1195,7 +1195,7 @@ public class ColumnFamilyDescriptorBuilder {
      * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb magic
      *              prefix
      * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from <code>bytes</code>
-     *         n * @see #toByteArray()
+     * @see #toByteArray()
      */
     private static ColumnFamilyDescriptor parseFrom(final byte[] bytes)
       throws DeserializationException {
@@ -1241,8 +1241,8 @@ public class ColumnFamilyDescriptorBuilder {
     }
 
     /**
-     * Remove a configuration setting represented by the key from the {@link #configuration} map. n
-     * * @return this (for chained invocation)
+     * Remove a configuration setting represented by the key from the {@link #configuration} map.
+     * @return this (for chained invocation)
      */
     public ModifyableColumnFamilyDescriptor removeConfiguration(final String key) {
       return setConfiguration(key, null);
@@ -1254,8 +1254,8 @@ public class ColumnFamilyDescriptorBuilder {
     }
 
     /**
-     * Set the encryption algorithm for use with this family n * @return this (for chained
-     * invocation)
+     * Set the encryption algorithm for use with this family
+     * @return this (for chained invocation)
      */
     public ModifyableColumnFamilyDescriptor setEncryptionType(String algorithm) {
       return setValue(ENCRYPTION_BYTES, algorithm);
@@ -1267,7 +1267,8 @@ public class ColumnFamilyDescriptorBuilder {
     }
 
     /**
-     * Set the raw crypto key attribute for the family n * @return this (for chained invocation)
+     * Set the raw crypto key attribute for the family
+     * @return this (for chained invocation)
      */
     public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) {
       return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 909d045759a..b7597d5eece 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -599,7 +599,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   }
 
   /**
-   * If choreService has not been created yet, create the ChoreService. n
+   * If choreService has not been created yet, create the ChoreService.
    */
   synchronized ChoreService getChoreService() {
     if (choreService == null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 8dabf0671a0..1f0893d3a33 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -95,7 +95,7 @@ public class Delete extends Mutation {
    * <p>
    * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you
    * must specify each timestamp individually.
-   * @param row We make a local copy of this passed in row. nn
+   * @param row We make a local copy of this passed in row.
    */
   public Delete(final byte[] row, final int rowOffset, final int rowLength) {
     this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@@ -109,8 +109,8 @@ public class Delete extends Mutation {
    * <p>
    * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you
    * must specify each timestamp individually.
-   * @param row We make a local copy of this passed in row. nn * @param timestamp maximum version
-   *            timestamp (only for delete row)
+   * @param row       We make a local copy of this passed in row.
+   * @param timestamp maximum version timestamp (only for delete row)
    */
   public Delete(final byte[] row, final int rowOffset, final int rowLength, long timestamp) {
     checkRow(row, rowOffset, rowLength);
@@ -140,8 +140,9 @@ public class Delete extends Mutation {
   /**
    * Advanced use only. Add an existing delete marker to this Delete object.
    * @param kv An existing KeyValue of type "delete".
-   * @return this for invocation chaining n * @deprecated As of release 2.0.0, this will be removed
-   *         in HBase 3.0.0. Use {@link #add(Cell)} instead
+   * @return this for invocation chaining
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)}
+   *             instead
    */
   @SuppressWarnings("unchecked")
   @Deprecated
@@ -152,7 +153,7 @@ public class Delete extends Mutation {
   /**
    * Add an existing delete marker to this Delete object.
    * @param cell An existing cell of type "delete".
-   * @return this for invocation chaining n
+   * @return this for invocation chaining
    */
   public Delete add(Cell cell) throws IOException {
     super.add(cell);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index 0672faeb73e..3fe7b18799c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -87,7 +87,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Copy-constructor n
+   * Copy-constructor
    */
   public Get(Get get) {
     this(get.getRow());
@@ -126,7 +126,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Create a Get operation for the specified row. nnn
+   * Create a Get operation for the specified row.
    */
   public Get(byte[] row, int rowOffset, int rowLength) {
     Mutation.checkRow(row, rowOffset, rowLength);
@@ -134,7 +134,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Create a Get operation for the specified row. n
+   * Create a Get operation for the specified row.
    */
   public Get(ByteBuffer row) {
     Mutation.checkRow(row);
@@ -208,7 +208,8 @@ public class Get extends Query implements Row {
   /**
    * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp).
    * @param minStamp minimum timestamp value, inclusive
-   * @param maxStamp maximum timestamp value, exclusive n * @return this for invocation chaining
+   * @param maxStamp maximum timestamp value, exclusive
+   * @return this for invocation chaining
    */
   public Get setTimeRange(long minStamp, long maxStamp) throws IOException {
     tr = new TimeRange(minStamp, maxStamp);
@@ -351,7 +352,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Method for retrieving the get's row n
+   * Method for retrieving the get's row
    */
   @Override
   public byte[] getRow() {
@@ -383,7 +384,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Method for retrieving the get's TimeRange n
+   * Method for retrieving the get's TimeRange
    */
   public TimeRange getTimeRange() {
     return this.tr;
@@ -414,7 +415,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Method for retrieving the get's familyMap n
+   * Method for retrieving the get's familyMap
    */
   public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
     return this.familyMap;
@@ -422,7 +423,7 @@ public class Get extends Query implements Row {
 
   /**
    * Compile the table and column family (i.e. schema) information into a String. Useful for parsing
-   * and aggregation by debugging, logging, and administration tools. n
+   * and aggregation by debugging, logging, and administration tools.
    */
   @Override
   public Map<String, Object> getFingerprint() {
@@ -439,7 +440,7 @@ public class Get extends Query implements Row {
    * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
    * Map along with the fingerprinted information. Useful for debugging, logging, and administration
    * tools.
-   * @param maxCols a limit on the number of columns output prior to truncation n
+   * @param maxCols a limit on the number of columns output prior to truncation
    */
   @Override
   public Map<String, Object> toMap(int maxCols) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 915367127fc..2bb10385cfc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -1124,7 +1124,7 @@ public class HBaseAdmin implements Admin {
   }
 
   /**
-   * n * @return List of {@link HRegionInfo}.
+   * @return List of {@link HRegionInfo}.
    * @throws IOException if a remote or network exception occurs
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link #getRegions(ServerName)}.
@@ -1348,7 +1348,7 @@ public class HBaseAdmin implements Admin {
    * @param regionName   region to compact
    * @param columnFamily column family within a table or region
    * @param major        True if we are to do a major compaction.
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   private void compactRegion(final byte[] regionName, final byte[] columnFamily,
     final boolean major) throws IOException {
@@ -2309,7 +2309,7 @@ public class HBaseAdmin implements Admin {
   }
 
   /**
-   * n * @return List of {@link HRegionInfo}.
+   * @return List of {@link HRegionInfo}.
    * @throws IOException if a remote or network exception occurs
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link #getRegions(TableName)}.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index 0ba1bf8a8f6..5bf8dbe104a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -134,8 +134,8 @@ public class HTableMultiplexer {
 
   /**
    * The put request will be buffered by its corresponding buffer queue. Return false if the queue
-   * is already full. nn * @return true if the request can be accepted by its corresponding buffer
-   * queue.
+   * is already full.
+   * @return true if the request can be accepted by its corresponding buffer queue.
    */
   public boolean put(TableName tableName, final Put put) {
     return put(tableName, put, this.maxAttempts);
@@ -143,7 +143,8 @@ public class HTableMultiplexer {
 
   /**
    * The puts request will be buffered by their corresponding buffer queue. Return the list of puts
-   * which could not be queued. nn * @return the list of puts which could not be queued
+   * which could not be queued.
+   * @return the list of puts which could not be queued
    */
   public List<Put> put(TableName tableName, final List<Put> puts) {
     if (puts == null) return null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
index 5f758c7a5e1..6564601b22c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class ImmutableHColumnDescriptor extends HColumnDescriptor {
   /*
-   * Create an unmodifyable copy of an HColumnDescriptor n
+   * Create an unmodifyable copy of an HColumnDescriptor
    */
   ImmutableHColumnDescriptor(final HColumnDescriptor desc) {
     super(desc, false);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java
index f95dbeb001e..952ac4f7719 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 public class ImmutableHRegionInfo extends HRegionInfo {
 
   /*
-   * Creates an immutable copy of an HRegionInfo. n
+   * Creates an immutable copy of an HRegionInfo.
    */
   public ImmutableHRegionInfo(RegionInfo other) {
     super(other);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
index 2cce334e359..9200c85daa1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
@@ -41,7 +41,7 @@ public class ImmutableHTableDescriptor extends HTableDescriptor {
   }
 
   /*
-   * Create an unmodifyable copy of an HTableDescriptor n
+   * Create an unmodifyable copy of an HTableDescriptor
    */
   public ImmutableHTableDescriptor(final HTableDescriptor desc) {
     super(desc, false);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
index 0efd71e69d2..49361cec743 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
@@ -92,7 +92,8 @@ public class Increment extends Mutation {
 
   /**
    * Add the specified KeyValue to this operation.
-   * @param cell individual Cell n * @throws java.io.IOException e
+   * @param cell individual Cell
+   * @throws java.io.IOException e
    */
   public Increment add(Cell cell) throws IOException {
     super.add(cell);
@@ -120,7 +121,7 @@ public class Increment extends Mutation {
   }
 
   /**
-   * Gets the TimeRange used for this increment. n
+   * Gets the TimeRange used for this increment.
    */
   public TimeRange getTimeRange() {
     return this.tr;
@@ -138,7 +139,7 @@ public class Increment extends Mutation {
    * This range is used as [minStamp, maxStamp).
    * @param minStamp minimum timestamp value, inclusive
    * @param maxStamp maximum timestamp value, exclusive
-   * @throws IOException if invalid time range n
+   * @throws IOException if invalid time range
    */
   public Increment setTimeRange(long minStamp, long maxStamp) throws IOException {
     tr = new TimeRange(minStamp, maxStamp);
@@ -208,8 +209,7 @@ public class Increment extends Mutation {
   }
 
   /**
-   * n
-   */
+   *   */
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
index 7a919f5ad5f..e7c600ee50b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
@@ -107,7 +107,7 @@ abstract class MasterCallable<V> implements RetryingCallable<V>, Closeable {
    * configured to make this rpc call, use getRpcController(). We are trying to contain
    * rpcController references so we don't pollute codebase with protobuf references; keep the
    * protobuf references contained and only present in a few classes rather than all about the code
-   * base. n
+   * base.
    */
   protected abstract V rpcCall() throws Exception;
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
index dd19588d307..e0b3240b023 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
@@ -186,9 +186,7 @@ public class MetaCache {
     }
   }
 
-  /**
-   * n * @return Map of cached locations for passed <code>tableName</code>
-   */
+  /** Returns Map of cached locations for passed <code>tableName</code> */
   private ConcurrentNavigableMap<byte[], RegionLocations>
     getTableLocations(final TableName tableName) {
     // find the map of cached locations for this table
@@ -287,7 +285,7 @@ public class MetaCache {
 
   /**
    * Delete a cached location, no matter what it is. Called when we were told to not use cache.
-   * @param tableName tableName n
+   * @param tableName tableName
    */
   public void clearCache(final TableName tableName, final byte[] row) {
     ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
index cc3db106c4e..f224d1fc66f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
@@ -60,7 +60,7 @@ public final class MultiAction {
   /**
    * Add an Action to this container based on it's regionName. If the regionName is wrong, the
    * initial execution will fail, but will be automatically retried after looking up the correct
-   * region. nn
+   * region.
    */
   public void add(byte[] regionName, Action a) {
     add(regionName, Collections.singletonList(a));
@@ -69,7 +69,8 @@ public final class MultiAction {
   /**
    * Add an Action to this container based on it's regionName. If the regionName is wrong, the
    * initial execution will fail, but will be automatically retried after looking up the correct
-   * region. n * @param actionList list of actions to add for the region
+   * region.
+   * @param actionList list of actions to add for the region
    */
   public void add(byte[] regionName, List<Action> actionList) {
     List<Action> rsActions = actions.get(regionName);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
index b768b76fdb6..37f5cbe695d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
@@ -54,9 +54,9 @@ public class MultiResponse extends AbstractResponse {
   }
 
   /**
-   * Add the pair to the container, grouped by the regionName n * @param originalIndex the original
-   * index of the Action (request).
-   * @param resOrEx the result or error; will be empty for successful Put and Delete actions.
+   * Add the pair to the container, grouped by the regionName
+   * @param originalIndex the original index of the Action (request).
+   * @param resOrEx       the result or error; will be empty for successful Put and Delete actions.
    */
   public void add(byte[] regionName, int originalIndex, Object resOrEx) {
     getResult(regionName).addResult(originalIndex, resOrEx);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index 7627f8dc0e5..a9382f3a9be 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -179,7 +179,7 @@ class MutableRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get current table name of the region n
+   * Get current table name of the region
    */
   @Override
   public TableName getTable() {
@@ -230,7 +230,8 @@ class MutableRegionInfo implements RegionInfo {
   }
 
   /**
-   * @param split set split status n
+   * Change the split status flag.
+   * @param split set split status
    */
   public MutableRegionInfo setSplit(boolean split) {
     this.split = split;
@@ -251,7 +252,7 @@ class MutableRegionInfo implements RegionInfo {
   /**
    * The parent of a region split is offline while split daughters hold references to the parent.
    * Offlined regions are closed.
-   * @param offLine Set online/offline status. n
+   * @param offLine Set online/offline status.
    */
   public MutableRegionInfo setOffline(boolean offLine) {
     this.offLine = offLine;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 408b02e44fa..16faf8576f1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -161,8 +161,8 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Create a KeyValue with this objects row key and the Put identifier. nnnn * @param tags -
-   * Specify the Tags as an Array
+   * Create a KeyValue with this objects row key and the Put identifier.
+   * @param tags - Specify the Tags as an Array
    * @return a KeyValue with this objects row key and the Put identifier.
    */
   KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value, Tag[] tags) {
@@ -183,7 +183,7 @@ public abstract class Mutation extends OperationWithAttributes
 
   /**
    * Compile the column family (i.e. schema) information into a Map. Useful for parsing and
-   * aggregation by debugging, logging, and administration tools. n
+   * aggregation by debugging, logging, and administration tools.
    */
   @Override
   public Map<String, Object> getFingerprint() {
@@ -202,7 +202,7 @@ public abstract class Mutation extends OperationWithAttributes
    * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
    * Map along with the fingerprinted information. Useful for debugging, logging, and administration
    * tools.
-   * @param maxCols a limit on the number of columns output prior to truncation n
+   * @param maxCols a limit on the number of columns output prior to truncation
    */
   @Override
   public Map<String, Object> toMap(int maxCols) {
@@ -265,7 +265,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Set the durability for this mutation n
+   * Set the durability for this mutation
    */
   public Mutation setDurability(Durability d) {
     this.durability = d;
@@ -278,7 +278,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Method for retrieving the put's familyMap n
+   * Method for retrieving the put's familyMap
    */
   public NavigableMap<byte[], List<Cell>> getFamilyCellMap() {
     return this.familyMap;
@@ -306,7 +306,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Method for retrieving the delete's row n
+   * Method for retrieving the delete's row
    */
   @Override
   public byte[] getRow() {
@@ -324,8 +324,9 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Method for retrieving the timestamp n * @deprecated As of release 2.0.0, this will be removed
-   * in HBase 3.0.0. Use {@link #getTimestamp()} instead
+   * Method for retrieving the timestamp
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
+   *             {@link #getTimestamp()} instead
    */
   @Deprecated
   public long getTimeStamp() {
@@ -333,7 +334,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Method for retrieving the timestamp. n
+   * Method for retrieving the timestamp.
    */
   public long getTimestamp() {
     return this.ts;
@@ -369,7 +370,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Sets the visibility expression associated with cells in this Mutation. n
+   * Sets the visibility expression associated with cells in this Mutation.
    */
   public Mutation setCellVisibility(CellVisibility expression) {
     this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@@ -385,8 +386,8 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
-   * protocol buffer CellVisibility
+   * Create a protocol buffer CellVisibility based on a client CellVisibility.
+   * @return a protocol buffer CellVisibility
    */
   static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
     ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@@ -395,8 +396,8 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
-   * client CellVisibility
+   * Convert a protocol buffer CellVisibility to a client CellVisibility
+   * @return the converted client CellVisibility
    */
   private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
     if (proto == null) return null;
@@ -404,8 +405,8 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
-   * converted client CellVisibility n
+   * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
+   * @return the converted client CellVisibility
    */
   private static CellVisibility toCellVisibility(byte[] protoBytes)
     throws DeserializationException {
@@ -510,7 +511,7 @@ public abstract class Mutation extends OperationWithAttributes
 
   /**
    * Set the TTL desired for the result of the mutation, in milliseconds.
-   * @param ttl the TTL desired for the result of the mutation, in milliseconds n
+   * @param ttl the TTL desired for the result of the mutation, in milliseconds
    */
   public Mutation setTTL(long ttl) {
     setAttribute(OP_ATTRIBUTE_TTL, Bytes.toBytes(ttl));
@@ -626,8 +627,8 @@ public abstract class Mutation extends OperationWithAttributes
 
   /*
    * Private method to determine if this object's familyMap contains the given value assigned to the
-   * given family, qualifier and timestamp respecting the 2 boolean arguments nnnnnn * @return
-   * returns true if the given family, qualifier timestamp and value already has an existing
+   * given family, qualifier and timestamp respecting the 2 boolean arguments
+   * @return returns true if the given family, qualifier timestamp and value already has an existing
    * KeyValue object in the family map.
    */
   protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS,
@@ -689,8 +690,9 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * @param row Row to check nn * @throws IllegalArgumentException Thrown if <code>row</code> is
-   *            empty or null or &gt; {@link HConstants#MAX_ROW_LENGTH}
+   * @param row Row to check
+   * @throws IllegalArgumentException Thrown if <code>row</code> is empty or null or &gt;
+   *                                  {@link HConstants#MAX_ROW_LENGTH}
    * @return <code>row</code>
    */
   static byte[] checkRow(final byte[] row, final int offset, final int length) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
index a517f0bb43a..2cad5ef7325 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
@@ -94,7 +94,7 @@ public abstract class Operation {
   /**
    * Produces a string representation of this Operation. It defaults to a JSON representation, but
    * falls back to a string representation of the fingerprint and details in the case of a JSON
-   * encoding failure. n
+   * encoding failure.
    */
   @Override
   public String toString() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
index e34c9d6eacb..33c1d853e1a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
@@ -106,7 +106,7 @@ public abstract class OperationWithAttributes extends Operation implements Attri
    * This method allows you to set an identifier on an operation. The original motivation for this
    * was to allow the identifier to be used in slow query logging, but this could obviously be
    * useful in other places. One use of this could be to put a class.method identifier in here to
-   * see where the slow query is coming from. n * id to set for the scan
+   * see where the slow query is coming from. id to set for the scan
    */
   public OperationWithAttributes setId(String id) {
     setAttribute(ID_ATRIBUTE, Bytes.toBytes(id));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
index 46770fc4e74..b272aa56edd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
@@ -131,7 +131,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
   /**
    * Handles failures encountered when communicating with a server. Updates the FailureInfo in
    * repeatedFailuresMap to reflect the failure. Throws RepeatedConnectException if the client is in
-   * Fast fail mode. nn * - the throwable to be handled. n
+   * Fast fail mode. - the throwable to be handled.
    */
   protected void handleFailureToServer(ServerName serverName, Throwable t) {
     if (serverName == null || t == null) {
@@ -200,7 +200,8 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
   /**
    * Checks to see if we are in the Fast fail mode for requests to the server. If a client is unable
    * to contact a server for more than fastFailThresholdMilliSec the client will get into fast fail
-   * mode. n * @return true if the client is in fast fail mode for the server.
+   * mode.
+   * @return true if the client is in fast fail mode for the server.
    */
   private boolean inFastFailMode(ServerName server) {
     FailureInfo fInfo = repeatedFailuresMap.get(server);
@@ -224,7 +225,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
    * Check to see if the client should try to connnect to the server, inspite of knowing that it is
    * in the fast fail mode. The idea here is that we want just one client thread to be actively
    * trying to reconnect, while all the other threads trying to reach the server will short circuit.
-   * n * @return true if the client should try to connect to the server.
+   * @return true if the client should try to connect to the server.
    */
   protected boolean shouldRetryInspiteOfFastFail(FailureInfo fInfo) {
     // We believe that the server is down, But, we want to have just one
@@ -245,7 +246,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
   }
 
   /**
-   * This function updates the Failure info for a particular server after the attempt to nnnn
+   * This function updates the Failure info for a particular server after the attempt to
    */
   private void updateFailureInfoForServer(ServerName server, FailureInfo fInfo, boolean didTry,
     boolean couldNotCommunicate, boolean retryDespiteFastFailMode) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
index a3502f29bd0..e01d2085d4d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
@@ -60,7 +60,7 @@ public class Put extends Mutation implements HeapSize {
   }
 
   /**
-   * We make a copy of the passed in row key to keep local. nnn
+   * We make a copy of the passed in row key to keep local.
    */
   public Put(byte[] rowArray, int rowOffset, int rowLength) {
     this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@@ -88,7 +88,7 @@ public class Put extends Mutation implements HeapSize {
   }
 
   /**
-   * We make a copy of the passed in row key to keep local. nnnn
+   * We make a copy of the passed in row key to keep local.
    */
   public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) {
     checkRow(rowArray, rowOffset, rowLength);
@@ -155,7 +155,7 @@ public class Put extends Mutation implements HeapSize {
    * Add the specified column and value to this Put operation.
    * @param family    family name
    * @param qualifier column qualifier
-   * @param value     column value n
+   * @param value     column value
    */
   public Put addColumn(byte[] family, byte[] qualifier, byte[] value) {
     return addColumn(family, qualifier, this.ts, value);
@@ -178,7 +178,7 @@ public class Put extends Mutation implements HeapSize {
    * @param family    family name
    * @param qualifier column qualifier
    * @param ts        version timestamp
-   * @param value     column value n
+   * @param value     column value
    */
   public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) {
     if (ts < 0) {
@@ -222,7 +222,7 @@ public class Put extends Mutation implements HeapSize {
    * @param family    family name
    * @param qualifier column qualifier
    * @param ts        version timestamp
-   * @param value     column value n
+   * @param value     column value
    */
   public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) {
     if (ts < 0) {
@@ -255,7 +255,8 @@ public class Put extends Mutation implements HeapSize {
   /**
    * Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is
    * immutable and its backing array will not be modified for the duration of this Put.
-   * @param cell individual cell n * @throws java.io.IOException e
+   * @param cell individual cell
+   * @throws java.io.IOException e
    */
   public Put add(Cell cell) throws IOException {
     super.add(cell);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index d752693dabc..97f687857f0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -47,9 +47,6 @@ public abstract class Query extends OperationWithAttributes {
   protected Map<byte[], TimeRange> colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
   protected Boolean loadColumnFamiliesOnDemand = null;
 
-  /**
-   * n
-   */
   public Filter getFilter() {
     return filter;
   }
@@ -67,7 +64,7 @@ public abstract class Query extends OperationWithAttributes {
   }
 
   /**
-   * Sets the authorizations to be used by this Query n
+   * Sets the authorizations to be used by this Query
    */
   public Query setAuthorizations(Authorizations authorizations) {
     this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@@ -131,7 +128,7 @@ public abstract class Query extends OperationWithAttributes {
    * Specify region replica id where Query will fetch data from. Use this together with
    * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a
    * specific replicaId. <br>
-   * <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing n
+   * <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing
    */
   public Query setReplicaId(int Id) {
     this.targetReplicaId = Id;
@@ -208,7 +205,7 @@ public abstract class Query extends OperationWithAttributes {
    * Column Family time ranges take precedence over the global time range.
    * @param cf       the column family for which you want to restrict
    * @param minStamp minimum timestamp value, inclusive
-   * @param maxStamp maximum timestamp value, exclusive n
+   * @param maxStamp maximum timestamp value, exclusive
    */
 
   public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 8b2ed345306..fea19680034 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -184,7 +184,7 @@ public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<
    * Run RPC call.
    * @param rpcController PayloadCarryingRpcController is a mouthful but it at a minimum is a facade
    *                      on protobuf so we don't have to put protobuf everywhere; we can keep it
-   *                      behind this class. n
+   *                      behind this class.
    */
   protected abstract T call(HBaseRpcController rpcController) throws Exception;
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
index 58163a2d74a..3f353b5799d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
@@ -59,7 +59,8 @@ public class RegionInfoDisplay {
   }
 
   /**
-   * Get the start key for display. Optionally hide the real start key. nn * @return the startkey
+   * Get the start key for display. Optionally hide the real start key.
+   * @return the startkey
    */
   public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) {
     boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
@@ -68,15 +69,16 @@ public class RegionInfoDisplay {
   }
 
   /**
-   * Get the region name for display. Optionally hide the start key. nn * @return region name as
-   * String
+   * Get the region name for display. Optionally hide the start key.
+   * @return region name as String
    */
   public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuration conf) {
     return Bytes.toStringBinary(getRegionNameForDisplay(ri, conf));
   }
 
   /**
-   * Get the region name for display. Optionally hide the start key. nn * @return region name bytes
+   * Get the region name for display. Optionally hide the start key.
+   * @return region name bytes
    */
   public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) {
     boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
index 4475a01c0f9..5cae32fe241 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
@@ -51,7 +51,8 @@ public class RegionReplicaUtil {
   /**
    * Returns the RegionInfo for the given replicaId. RegionInfo's correspond to a range of a table,
    * but more than one "instance" of the same range can be deployed which are differentiated by the
-   * replicaId. n * @param replicaId the replicaId to use
+   * replicaId.
+   * @param replicaId the replicaId to use
    * @return an RegionInfo object corresponding to the same range (table, start and end key), but
    *         for the given replicaId.
    */
@@ -84,7 +85,7 @@ public class RegionReplicaUtil {
   }
 
   /**
-   * Removes the non-default replicas from the passed regions collection n
+   * Removes the non-default replicas from the passed regions collection
    */
   public static void removeNonDefaultRegions(Collection<RegionInfo> regions) {
     Iterator<RegionInfo> iterator = regions.iterator();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index abe4058b99d..a97c6da80ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -132,7 +132,7 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
    * configured to make this rpc call, use getRpcController(). We are trying to contain
    * rpcController references so we don't pollute codebase with protobuf references; keep the
    * protobuf references contained and only present in a few classes rather than all about the code
-   * base. n
+   * base.
    */
   protected abstract T rpcCall() throws Exception;
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index 2ae13c6ce1c..51821fc7429 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -186,7 +186,7 @@ public class Result implements CellScannable, CellScanner {
 
   /**
    * Method for retrieving the row key that corresponds to the row from which this Result was
-   * created. n
+   * created.
    */
   public byte[] getRow() {
     if (this.row == null) {
@@ -227,8 +227,9 @@ public class Result implements CellScannable, CellScanner {
    * or Get) only requested 1 version the list will contain at most 1 entry. If the column did not
    * exist in the result set (either the column does not exist or the column was not selected in the
    * query) the list will be empty. Also see getColumnLatest which returns just a Cell
-   * @param family the family n * @return a list of Cells for this column or empty list if the
-   *               column did not exist in the result set
+   * @param family the family
+   * @return a list of Cells for this column or empty list if the column did not exist in the result
+   *         set
    */
   public List<Cell> getColumnCells(byte[] family, byte[] qualifier) {
     List<Cell> result = new ArrayList<>();
@@ -324,7 +325,7 @@ public class Result implements CellScannable, CellScanner {
   }
 
   /**
-   * The Cell for the most recent timestamp for a given column. nn *
+   * The Cell for the most recent timestamp for a given column.
    * @return the Cell for the column, or null if no value exists in the row or none have been
    *         selected in the query (Get/Scan)
    */
@@ -677,8 +678,7 @@ public class Result implements CellScannable, CellScanner {
   }
 
   /**
-   * n
-   */
+   *   */
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
@@ -800,7 +800,8 @@ public class Result implements CellScannable, CellScanner {
   }
 
   /**
-   * Get total size of raw cells n * @return Total size.
+   * Get total size of raw cells
+   * @return Total size.
    */
   public static long getTotalSizeOfCells(Result result) {
     long size = 0;
@@ -816,7 +817,7 @@ public class Result implements CellScannable, CellScanner {
   /**
    * Copy another Result into this one. Needed for the old Mapred framework
    * @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT (which is supposed
-   *                                       to be immutable). n
+   *                                       to be immutable).
    */
   public void copyFrom(Result other) {
     checkReadonly();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
index 74ff6de6f93..ebb27ceff75 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
@@ -86,7 +86,7 @@ public interface ResultScanner extends Closeable, Iterable<Result> {
    * setting (or hbase.client.scanner.caching in hbase-site.xml).
    * @param nbRows number of rows to return
    * @return Between zero and nbRows rowResults. Scan is done if returned array is of zero-length
-   *         (We never return null). n
+   *         (We never return null).
    */
   default Result[] next(int nbRows) throws IOException {
     List<Result> resultSets = new ArrayList<>(nbRows);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java
index cb3b2fd3cd6..719b6b2aae7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java
@@ -46,20 +46,20 @@ abstract class RetryingCallerInterceptor {
   public abstract RetryingCallerInterceptorContext createEmptyContext();
 
   /**
-   * Call this function in case we caught a failure during retries. n * : The context object that we
-   * obtained previously. n * : The exception that we caught in this particular try n
+   * Call this function in case we caught a failure during retries. : The context object that we
+   * obtained previously. : The exception that we caught in this particular try
    */
   public abstract void handleFailure(RetryingCallerInterceptorContext context, Throwable t)
     throws IOException;
 
   /**
-   * Call this function alongside the actual call done on the callable. nn
+   * Call this function alongside the actual call done on the callable.
    */
   public abstract void intercept(
     RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) throws IOException;
 
   /**
-   * Call this function to update at the end of the retry. This is not necessary to happen. n
+   * Call this function to update at the end of the retry. This is not necessary to happen.
    */
   public abstract void updateFailureInfo(RetryingCallerInterceptorContext context);
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java
index b810de46c44..177777624d6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java
@@ -38,17 +38,17 @@ abstract class RetryingCallerInterceptorContext {
 
   /**
    * This prepares the context object by populating it with information specific to the
-   * implementation of the {@link RetryingCallerInterceptor} along with which this will be used. n *
-   * : The {@link RetryingCallable} that contains the information about the call that is being made.
+   * implementation of the {@link RetryingCallerInterceptor} along with which this will be used. :
+   * The {@link RetryingCallable} that contains the information about the call that is being made.
    * @return A new {@link RetryingCallerInterceptorContext} object that can be used for use in the
    *         current retrying call
    */
   public abstract RetryingCallerInterceptorContext prepare(RetryingCallable<?> callable);
 
   /**
-   * Telescopic extension that takes which of the many retries we are currently in. n * : The
-   * {@link RetryingCallable} that contains the information about the call that is being made. n * :
-   * The retry number that we are currently in.
+   * Telescopic extension that takes which of the many retries we are currently in. : The
+   * {@link RetryingCallable} that contains the information about the call that is being made. : The
+   * retry number that we are currently in.
    * @return A new context object that can be used for use in the current retrying call
    */
   public abstract RetryingCallerInterceptorContext prepare(RetryingCallable<?> callable, int tries);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
index 1d9b94a54cc..2a3c7530fb7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
@@ -39,8 +39,8 @@ public class RowMutations implements Row {
 
   /**
    * Create a {@link RowMutations} with the specified mutations.
-   * @param mutations the mutations to send n * @throws IOException if any row in mutations is
-   *                  different to another
+   * @param mutations the mutations to send
+   * @throws IOException if any row in mutations is different to another
    */
   public static RowMutations of(List<? extends Mutation> mutations) throws IOException {
     if (CollectionUtils.isEmpty(mutations)) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 2327142699d..7c964de74e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -326,7 +326,7 @@ public class Scan extends Query {
    * Get all columns from the specified family.
    * <p>
    * Overrides previous calls to addColumn for this family.
-   * @param family family name n
+   * @param family family name
    */
   public Scan addFamily(byte[] family) {
     familyMap.remove(family);
@@ -339,7 +339,7 @@ public class Scan extends Query {
    * <p>
    * Overrides previous calls to addFamily for this family.
    * @param family    family name
-   * @param qualifier column qualifier n
+   * @param qualifier column qualifier
    */
   public Scan addColumn(byte[] family, byte[] qualifier) {
     NavigableSet<byte[]> set = familyMap.get(family);
@@ -361,7 +361,7 @@ public class Scan extends Query {
    * @param minStamp minimum timestamp value, inclusive
    * @param maxStamp maximum timestamp value, exclusive
    * @see #setMaxVersions()
-   * @see #setMaxVersions(int) n
+   * @see #setMaxVersions(int)
    */
   public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
     tr = new TimeRange(minStamp, maxStamp);
@@ -374,8 +374,9 @@ public class Scan extends Query {
    * number of versions beyond the defaut.
    * @param timestamp version timestamp
    * @see #setMaxVersions()
-   * @see #setMaxVersions(int) n * @deprecated As of release 2.0.0, this will be removed in HBase
-   *      3.0.0. Use {@link #setTimestamp(long)} instead
+   * @see #setMaxVersions(int)
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
+   *             {@link #setTimestamp(long)} instead
    */
   @Deprecated
   public Scan setTimeStamp(long timestamp) throws IOException {
@@ -388,7 +389,7 @@ public class Scan extends Query {
    * number of versions beyond the defaut.
    * @param timestamp version timestamp
    * @see #setMaxVersions()
-   * @see #setMaxVersions(int) n
+   * @see #setMaxVersions(int)
    */
   public Scan setTimestamp(long timestamp) {
     try {
@@ -412,9 +413,9 @@ public class Scan extends Query {
    * <p>
    * If the specified row does not exist, the Scanner will start from the next closest row after the
    * specified row.
-   * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if
-   *                 startRow does not meet criteria for a row key (when length exceeds
-   *                 {@link HConstants#MAX_ROW_LENGTH})
+   * @param startRow row to start scanner at or after
+   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStartRow(byte[])}
    *             instead. This method may change the inclusive of the stop row to keep compatible
    *             with the old behavior.
@@ -436,9 +437,9 @@ public class Scan extends Query {
    * <p>
    * If the specified row does not exist, the Scanner will start from the next closest row after the
    * specified row.
-   * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if
-   *                 startRow does not meet criteria for a row key (when length exceeds
-   *                 {@link HConstants#MAX_ROW_LENGTH})
+   * @param startRow row to start scanner at or after
+   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
   public Scan withStartRow(byte[] startRow) {
     return withStartRow(startRow, true);
@@ -450,9 +451,9 @@ public class Scan extends Query {
    * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
    * will start from the next closest row after the specified row.
    * @param startRow  row to start scanner at or after
-   * @param inclusive whether we should include the start row when scan n * @throws
-   *                  IllegalArgumentException if startRow does not meet criteria for a row key
-   *                  (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
+   * @param inclusive whether we should include the start row when scan
+   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
   public Scan withStartRow(byte[] startRow, boolean inclusive) {
     if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
@@ -472,9 +473,9 @@ public class Scan extends Query {
    * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
    * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
    * </p>
-   * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does
-   *                not meet criteria for a row key (when length exceeds
-   *                {@link HConstants#MAX_ROW_LENGTH})
+   * @param stopRow row to end at (exclusive)
+   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStopRow(byte[])} instead.
    *             This method may change the inclusive of the stop row to keep compatible with the
    *             old behavior.
@@ -499,9 +500,9 @@ public class Scan extends Query {
    * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
    * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
    * </p>
-   * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does
-   *                not meet criteria for a row key (when length exceeds
-   *                {@link HConstants#MAX_ROW_LENGTH})
+   * @param stopRow row to end at (exclusive)
+   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
   public Scan withStopRow(byte[] stopRow) {
     return withStopRow(stopRow, false);
@@ -513,9 +514,9 @@ public class Scan extends Query {
    * The scan will include rows that are lexicographically less than (or equal to if
    * {@code inclusive} is {@code true}) the provided stopRow.
    * @param stopRow   row to end at
-   * @param inclusive whether we should include the stop row when scan n * @throws
-   *                  IllegalArgumentException if stopRow does not meet criteria for a row key (when
-   *                  length exceeds {@link HConstants#MAX_ROW_LENGTH})
+   * @param inclusive whether we should include the stop row when scan
+   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
   public Scan withStopRow(byte[] stopRow, boolean inclusive) {
     if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
@@ -543,7 +544,7 @@ public class Scan extends Query {
    * <b>NOTE: Doing a {@link #setStartRow(byte[])} and/or {@link #setStopRow(byte[])} after this
    * method will yield undefined results.</b>
    * </p>
-   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n
+   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
    */
   public Scan setRowPrefixFilter(byte[] rowPrefix) {
     if (rowPrefix == null) {
@@ -557,9 +558,9 @@ public class Scan extends Query {
   }
 
   /**
-   * Get all available versions. n * @deprecated since 2.0.0 and will be removed in 3.0.0. It is
-   * easy to misunderstand with column family's max versions, so use {@link #readAllVersions()}
-   * instead.
+   * Get all available versions.
+   * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column
+   *             family's max versions, so use {@link #readAllVersions()} instead.
    * @see #readAllVersions()
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a>
    */
@@ -570,9 +571,9 @@ public class Scan extends Query {
 
   /**
    * Get up to the specified number of versions of each column.
-   * @param maxVersions maximum versions for each column n * @deprecated since 2.0.0 and will be
-   *                    removed in 3.0.0. It is easy to misunderstand with column family's max
-   *                    versions, so use {@link #readVersions(int)} instead.
+   * @param maxVersions maximum versions for each column
+   * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column
+   *             family's max versions, so use {@link #readVersions(int)} instead.
    * @see #readVersions(int)
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a>
    */
@@ -582,7 +583,7 @@ public class Scan extends Query {
   }
 
   /**
-   * Get all available versions. n
+   * Get all available versions.
    */
   public Scan readAllVersions() {
     this.maxVersions = Integer.MAX_VALUE;
@@ -591,7 +592,7 @@ public class Scan extends Query {
 
   /**
    * Get up to the specified number of versions of each column.
-   * @param versions specified number of versions for each column n
+   * @param versions specified number of versions for each column
    */
   public Scan readVersions(int versions) {
     this.maxVersions = versions;
@@ -669,7 +670,7 @@ public class Scan extends Query {
 
   /**
    * Setting the familyMap
-   * @param familyMap map of family to qualifier n
+   * @param familyMap map of family to qualifier
    */
   public Scan setFamilyMap(Map<byte[], NavigableSet<byte[]>> familyMap) {
     this.familyMap = familyMap;
@@ -677,7 +678,7 @@ public class Scan extends Query {
   }
 
   /**
-   * Getting the familyMap n
+   * Getting the familyMap
    */
   public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
     return this.familyMap;
@@ -752,16 +753,12 @@ public class Scan extends Query {
     return this.caching;
   }
 
-  /**
-   * n
-   */
+  /** Returns TimeRange */
   public TimeRange getTimeRange() {
     return this.tr;
   }
 
-  /**
-   * n
-   */
+  /** Returns RowFilter */
   @Override
   public Filter getFilter() {
     return filter;
@@ -796,7 +793,7 @@ public class Scan extends Query {
    * Set whether this scan is a reversed one
    * <p>
    * This is false by default which means forward(normal) scan.
-   * @param reversed if true, scan will be backward order n
+   * @param reversed if true, scan will be backward order
    */
   public Scan setReversed(boolean reversed) {
     this.reversed = reversed;
@@ -815,7 +812,8 @@ public class Scan extends Query {
    * Setting whether the caller wants to see the partial results when server returns
    * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
    * default this value is false and the complete results will be assembled client side before being
-   * delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow()
+   * delivered to the caller.
+   * @see Result#mayHaveMoreCellsInRow()
    * @see #setBatch(int)
    */
   public Scan setAllowPartialResults(final boolean allowPartialResults) {
@@ -839,7 +837,7 @@ public class Scan extends Query {
 
   /**
    * Compile the table and column family (i.e. schema) information into a String. Useful for parsing
-   * and aggregation by debugging, logging, and administration tools. n
+   * and aggregation by debugging, logging, and administration tools.
    */
   @Override
   public Map<String, Object> getFingerprint() {
@@ -861,7 +859,7 @@ public class Scan extends Query {
    * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
    * Map along with the fingerprinted information. Useful for debugging, logging, and administration
    * tools.
-   * @param maxCols a limit on the number of columns output prior to truncation n
+   * @param maxCols a limit on the number of columns output prior to truncation
    */
   @Override
   public Map<String, Object> toMap(int maxCols) {
@@ -942,11 +940,12 @@ public class Scan extends Query {
    * non-compaction read request https://issues.apache.org/jira/browse/HBASE-7266 On the other hand,
    * if setting it true, we would do openScanner,next,closeScanner in one RPC call. It means the
    * better performance for small scan. [HBASE-9488]. Generally, if the scan range is within one
-   * data block(64KB), it could be considered as a small scan. n * @deprecated since 2.0.0 and will
-   * be removed in 3.0.0. Use {@link #setLimit(int)} and {@link #setReadType(ReadType)} instead. And
-   * for the one rpc optimization, now we will also fetch data when openScanner, and if the number
-   * of rows reaches the limit then we will close the scanner automatically which means we will fall
-   * back to one rpc.
+   * data block(64KB), it could be considered as a small scan.
+   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #setLimit(int)} and
+   *             {@link #setReadType(ReadType)} instead. And for the one rpc optimization, now we
+   *             will also fetch data when openScanner, and if the number of rows reaches the limit
+   *             then we will close the scanner automatically which means we will fall back to one
+   *             rpc.
    * @see #setLimit(int)
    * @see #setReadType(ReadType)
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a>
@@ -1065,7 +1064,7 @@ public class Scan extends Query {
    * reaches this value.
    * <p>
    * This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
-   * @param limit the limit of rows for this scan n
+   * @param limit the limit of rows for this scan
    */
   public Scan setLimit(int limit) {
     this.limit = limit;
@@ -1074,7 +1073,7 @@ public class Scan extends Query {
 
   /**
    * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
-   * set {@code readType} to {@link ReadType#PREAD}. n
+   * set {@code readType} to {@link ReadType#PREAD}.
    */
   public Scan setOneRowLimit() {
     return setLimit(1).setReadType(ReadType.PREAD);
@@ -1096,7 +1095,7 @@ public class Scan extends Query {
    * Set the read type for this scan.
    * <p>
    * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
-   * example, we will always use pread if this is a get scan. n
+   * example, we will always use pread if this is a get scan.
    */
   public Scan setReadType(ReadType readType) {
     this.readType = readType;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
index aeca91e5bc9..825a58e7bdd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
@@ -99,8 +99,8 @@ public class SecureBulkLoadClient {
   }
 
   /**
-   * Securely bulk load a list of HFiles using client protocol. nnnnnn * @return true if all are
-   * loaded n
+   * Securely bulk load a list of HFiles using client protocol.
+   * @return true if all are loaded
    */
   public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client,
     final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
@@ -110,8 +110,8 @@ public class SecureBulkLoadClient {
   }
 
   /**
-   * Securely bulk load a list of HFiles using client protocol. nnnnnnn * @return true if all are
-   * loaded n
+   * Securely bulk load a list of HFiles using client protocol.
+   * @return true if all are loaded
    */
   public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client,
     final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 894b4678908..11482a96a0f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -147,7 +147,8 @@ public interface Table extends Closeable {
    * @param results Empty Object[], same size as actions. Provides access to partial results, in
    *                case an exception is thrown. A null in the result array means that the call for
    *                that action failed, even after retries. The order of the objects in the results
-   *                array corresponds to the order of actions in the request list. n * @since 0.90.0
+   *                array corresponds to the order of actions in the request list.
+   * @since 0.90.0
    */
   default void batch(final List<? extends Row> actions, final Object[] results)
     throws IOException, InterruptedException {
@@ -358,8 +359,8 @@ public interface Table extends Closeable {
    * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
    *          {@link #put(List)} runs pre-flight validations on the input list on client. Currently
    *          {@link #delete(List)} doesn't run validations on the client, there is no need
-   *          currently, but this may change in the future. An * {@link IllegalArgumentException}
-   *          will be thrown in this case.
+   *          currently, but this may change in the future. An {@link IllegalArgumentException} will
+   *          be thrown in this case.
    */
   default void delete(List<Delete> deletes) throws IOException {
     throw new NotImplementedException("Add an implementation!");
@@ -770,12 +771,12 @@ public interface Table extends Closeable {
    * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
    * the invocations to the same region server will be batched into one call. The coprocessor
-   * service is invoked according to the service instance, method name and parameters. n * the
-   * descriptor for the protobuf service method to call. n * the method call parameters n * start
-   * region selection with region containing this row. If {@code null}, the selection will start
-   * with the first table region. n * select regions up to and including the region containing this
-   * row. If {@code null}, selection will continue through the last table region. n * the proto type
-   * of the response of the method in Service.
+   * service is invoked according to the service instance, method name and parameters. the
+   * descriptor for the protobuf service method to call. the method call parameters start region
+   * selection with region containing this row. If {@code null}, the selection will start with the
+   * first table region. select regions up to and including the region containing this row. If
+   * {@code null}, selection will continue through the last table region. the proto type of the
+   * response of the method in Service.
    * @param <R> the response type for the coprocessor Service method
    * @return a map of result values keyed by region name
    */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
index ff97f419bac..088c52d8f51 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
@@ -166,7 +166,7 @@ public interface TableDescriptor {
   String getRegionSplitPolicyClassName();
 
   /**
-   * Get the name of the table n
+   * Get the name of the table
    */
   TableName getTableName();
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index fa1b1b65224..473631b1b93 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -895,7 +895,7 @@ public class TableDescriptorBuilder {
     }
 
     /**
-     * Get the name of the table n
+     * Get the name of the table
      */
     @Override
     public TableName getTableName() {
@@ -1297,7 +1297,8 @@ public class TableDescriptorBuilder {
      * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
      * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
      * region is opened.
-     * @param className Full class name. n * @return the modifyable TD
+     * @param className Full class name.
+     * @return the modifyable TD
      */
     public ModifyableTableDescriptor setCoprocessor(String className) throws IOException {
       return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className)
@@ -1345,8 +1346,8 @@ public class TableDescriptorBuilder {
      * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
      * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
      * region is opened.
-     * @param specStr The Coprocessor specification all in in one String n * @return the modifyable
-     *                TD
+     * @param specStr The Coprocessor specification all in in one String
+     * @return the modifyable TD
      * @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed
      *             in HBase 3.0.0.
      */
@@ -1488,8 +1489,8 @@ public class TableDescriptorBuilder {
 
     /**
      * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix
-     * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code> n
-     *         * @see #toByteArray()
+     * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code>
+     * @see #toByteArray()
      */
     private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
       if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
index 6ecd97a75c9..6bf4ffd51c5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
@@ -42,7 +42,7 @@ public class TableState {
 
     /**
      * Covert from PB version of State
-     * @param state convert from n
+     * @param state convert from
      */
     public static State convert(HBaseProtos.TableState.State state) {
       State ret;
@@ -66,7 +66,7 @@ public class TableState {
     }
 
     /**
-     * Covert to PB version of State n
+     * Covert to PB version of State
      */
     public HBaseProtos.TableState.State convert() {
       HBaseProtos.TableState.State state;
@@ -140,7 +140,7 @@ public class TableState {
   }
 
   /**
-   * Table name for state n
+   * Table name for state
    */
   public TableName getTableName() {
     return tableName;
@@ -168,7 +168,7 @@ public class TableState {
   }
 
   /**
-   * Covert to PB version of TableState n
+   * Covert to PB version of TableState
    */
   public HBaseProtos.TableState convert() {
     return HBaseProtos.TableState.newBuilder().setState(this.state.convert()).build();
@@ -177,7 +177,7 @@ public class TableState {
   /**
    * Covert from PB version of TableState
    * @param tableName  table this state of
-   * @param tableState convert from n
+   * @param tableState convert from
    */
   public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) {
     TableState.State state = State.convert(tableState.getState());
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
index a2a53114ac7..cab7eff1516 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
@@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @Deprecated
 class UnmodifyableHRegionInfo extends HRegionInfo {
   /*
-   * Creates an unmodifyable copy of an HRegionInfo n
+   * Creates an unmodifyable copy of an HRegionInfo
    */
   UnmodifyableHRegionInfo(HRegionInfo info) {
     super(info);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
index ab5915ec975..76a0d6addf3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
@@ -33,7 +33,7 @@ public class ServerStatistics {
 
   /**
    * Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, as
-   * something gets set nn
+   * something gets set
    */
   public void update(byte[] region, RegionLoadStats currentStats) {
     RegionStatistics regionStat = this.stats.get(region);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
index 519109934eb..8d75af05cfc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
@@ -35,8 +35,8 @@ public class ServerSideScanMetrics {
   private final Map<String, AtomicLong> counters = new HashMap<>();
 
   /**
-   * Create a new counter with the specified name n * @return {@link AtomicLong} instance for the
-   * counter with counterName
+   * Create a new counter with the specified name
+   * @return {@link AtomicLong} instance for the counter with counterName
    */
   protected AtomicLong createCounter(String counterName) {
     AtomicLong c = new AtomicLong(0);
@@ -75,9 +75,6 @@ public class ServerSideScanMetrics {
    */
   public final AtomicLong countOfRowsScanned = createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME);
 
-  /**
-   * nn
-   */
   public void setCounter(String counterName, long value) {
     AtomicLong c = this.counters.get(counterName);
     if (c != null) {
@@ -85,23 +82,16 @@ public class ServerSideScanMetrics {
     }
   }
 
-  /**
-   * n * @return true if a counter exists with the counterName
-   */
+  /** Returns true if a counter exists with the counterName */
   public boolean hasCounter(String counterName) {
     return this.counters.containsKey(counterName);
   }
 
-  /**
-   * n * @return {@link AtomicLong} instance for this counter name, null if counter does not exist.
-   */
+  /** Returns {@link AtomicLong} instance for this counter name, null if counter does not exist. */
   public AtomicLong getCounter(String counterName) {
     return this.counters.get(counterName);
   }
 
-  /**
-   * nn
-   */
   public void addToCounter(String counterName, long delta) {
     AtomicLong c = this.counters.get(counterName);
     if (c != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 4f94dc67a88..d2816552d34 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -178,7 +178,8 @@ public class ReplicationAdmin implements Closeable {
 
   /**
    * Get the number of slave clusters the local cluster has.
-   * @return number of slave clusters n * @deprecated
+   * @return number of slave clusters
+   * @deprecated
    */
   @Deprecated
   public int getPeersCount() throws IOException {
@@ -222,8 +223,9 @@ public class ReplicationAdmin implements Closeable {
   /**
    * Append the replicable table-cf config of the specified peer
    * @param id       a short that identifies the cluster
-   * @param tableCfs table-cfs config str nn * @deprecated as release of 2.0.0, and it will be
-   *                 removed in 3.0.0, use {@link #appendPeerTableCFs(String, Map)} instead.
+   * @param tableCfs table-cfs config str
+   * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, use
+   *             {@link #appendPeerTableCFs(String, Map)} instead.
    */
   @Deprecated
   public void appendPeerTableCFs(String id, String tableCfs)
@@ -234,7 +236,7 @@ public class ReplicationAdmin implements Closeable {
   /**
    * Append the replicable table-cf config of the specified peer
    * @param id       a short that identifies the cluster
-   * @param tableCfs A map from tableName to column family names nn
+   * @param tableCfs A map from tableName to column family names
    */
   @Deprecated
   public void appendPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs)
@@ -245,8 +247,9 @@ public class ReplicationAdmin implements Closeable {
   /**
    * Remove some table-cfs from table-cfs config of the specified peer
    * @param id      a short name that identifies the cluster
-   * @param tableCf table-cfs config str nn * @deprecated as release of 2.0.0, and it will be
-   *                removed in 3.0.0, use {@link #removePeerTableCFs(String, Map)} instead.
+   * @param tableCf table-cfs config str
+   * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, use
+   *             {@link #removePeerTableCFs(String, Map)} instead.
    */
   @Deprecated
   public void removePeerTableCFs(String id, String tableCf)
@@ -257,7 +260,7 @@ public class ReplicationAdmin implements Closeable {
   /**
    * Remove some table-cfs from config of the specified peer
    * @param id       a short name that identifies the cluster
-   * @param tableCfs A map from tableName to column family names nn
+   * @param tableCfs A map from tableName to column family names
    */
   @Deprecated
   public void removePeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
index 5ed2aba2a19..232540a35aa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
@@ -50,36 +50,27 @@ import org.apache.yetus.audience.InterfaceStability;
 public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Message,
   R extends Message> {
 
-  /**
-   * nnn * @return value of type T n
-   */
+  /** Returns value of type T */
   public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException;
 
-  /**
-   * nn * @return sum or non null value among (if either of them is null); otherwise returns a null.
-   */
+  /** Returns sum or non null value among (if either of them is null); otherwise returns a null. */
   public abstract S add(S l1, S l2);
 
   /**
-   * returns the maximum value for this type T n
+   * returns the maximum value for this type T
    */
-
   public abstract T getMaxValue();
 
   public abstract T getMinValue();
 
-  /**
-   * nnn
-   */
+  /** Returns multiplication */
   public abstract S multiply(S o1, S o2);
 
-  /**
-   * nn
-   */
+  /** Returns increment */
   public abstract S increment(S o);
 
   /**
-   * provides casting opportunity between the data types. nn
+   * provides casting opportunity between the data types.
    */
   public abstract S castToReturnType(T o);
 
@@ -94,7 +85,7 @@ public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Messa
 
   /**
    * used for computing average of &lt;S&gt; data values. Not providing the divide method that takes
-   * two &lt;S&gt; values as it is not needed as of now. nnn
+   * two &lt;S&gt; values as it is not needed as of now.
    */
   public abstract double divideForAvg(S o, Long l);
 
@@ -110,37 +101,37 @@ public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Messa
 
   /**
    * This method should initialize any field(s) of the ColumnInterpreter with a parsing of the
-   * passed message bytes (used on the server side). n
+   * passed message bytes (used on the server side).
    */
   public abstract void initialize(P msg);
 
   /**
-   * This method gets the PB message corresponding to the cell type n * @return the PB message for
-   * the cell-type instance
+   * This method gets the PB message corresponding to the cell type
+   * @return the PB message for the cell-type instance
    */
   public abstract Q getProtoForCellType(T t);
 
   /**
-   * This method gets the PB message corresponding to the cell type n * @return the cell-type
-   * instance from the PB message
+   * This method gets the PB message corresponding to the cell type
+   * @return the cell-type instance from the PB message
    */
   public abstract T getCellValueFromProto(Q q);
 
   /**
-   * This method gets the PB message corresponding to the promoted type n * @return the PB message
-   * for the promoted-type instance
+   * This method gets the PB message corresponding to the promoted type
+   * @return the PB message for the promoted-type instance
    */
   public abstract R getProtoForPromotedType(S s);
 
   /**
-   * This method gets the promoted type from the proto message n * @return the promoted-type
-   * instance from the PB message
+   * This method gets the promoted type from the proto message
+   * @return the promoted-type instance from the PB message
    */
   public abstract S getPromotedValueFromProto(R r);
 
   /**
    * The response message comes as type S. This will convert/cast it to T. In some sense, performs
-   * the opposite of {@link #castToReturnType(Object)} nn
+   * the opposite of {@link #castToReturnType(Object)}
    */
   public abstract T castToCellType(S response);
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java
index ff9ed066fd4..de8e90ca9ec 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java
@@ -33,7 +33,7 @@ public class CoprocessorException extends DoNotRetryIOException {
   }
 
   /**
-   * Constructor with a Class object and exception message. nn
+   * Constructor with a Class object and exception message.
    */
   public CoprocessorException(Class<?> clazz, String s) {
     super("Coprocessor [" + clazz.getName() + "]: " + s);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
index 33b5d9cb57f..22b3ae4a3bf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
@@ -170,7 +170,7 @@ public final class ClientExceptionsUtil {
   /**
    * Translates exception for preemptive fast fail checks.
    * @param t exception to check
-   * @return translated exception n
+   * @return translated exception
    */
   public static Throwable translatePFFE(Throwable t) throws IOException {
     if (t instanceof NoSuchMethodError) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
index ae15777a7f0..00774e37094 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
@@ -35,15 +35,13 @@ public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRet
   }
 
   /**
-   * n
-   */
+   *   */
   public FailedSanityCheckException(String message) {
     super(message);
   }
 
   /**
-   * nn
-   */
+   *   */
   public FailedSanityCheckException(String message, Throwable cause) {
     super(message, cause);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java
index 7aff979f4c2..ff06e0648fd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java
@@ -65,8 +65,8 @@ public class BinaryComparator extends org.apache.hadoop.hbase.filter.ByteArrayCo
 
   /**
    * @param pbBytes A pb serialized {@link BinaryComparator} instance
-   * @return An instance of {@link BinaryComparator} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link BinaryComparator} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static BinaryComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
     ComparatorProtos.BinaryComparator proto;
@@ -79,8 +79,8 @@ public class BinaryComparator extends org.apache.hadoop.hbase.filter.ByteArrayCo
   }
 
   /**
-   * n * @return true if and only if the fields of the comparator that are serialized are equal to
-   * the corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the comparator that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(ByteArrayComparable other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java
index fcf447dcb0c..237833934d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java
@@ -69,8 +69,8 @@ public class BinaryPrefixComparator extends ByteArrayComparable {
 
   /**
    * @param pbBytes A pb serialized {@link BinaryPrefixComparator} instance
-   * @return An instance of {@link BinaryPrefixComparator} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link BinaryPrefixComparator} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static BinaryPrefixComparator parseFrom(final byte[] pbBytes)
     throws DeserializationException {
@@ -84,8 +84,8 @@ public class BinaryPrefixComparator extends ByteArrayComparable {
   }
 
   /**
-   * n * @return true if and only if the fields of the comparator that are serialized are equal to
-   * the corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the comparator that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(ByteArrayComparable other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java
index 8abdcf4e8a6..e462f81d945 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java
@@ -75,7 +75,8 @@ public class BitComparator extends ByteArrayComparable {
 
   /**
    * @param pbBytes A pb serialized {@link BitComparator} instance
-   * @return An instance of {@link BitComparator} made from <code>bytes</code> n * @see #toByteArray
+   * @return An instance of {@link BitComparator} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static BitComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
     ComparatorProtos.BitComparator proto;
@@ -89,8 +90,8 @@ public class BitComparator extends ByteArrayComparable {
   }
 
   /**
-   * n * @return true if and only if the fields of the comparator that are serialized are equal to
-   * the corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the comparator that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(ByteArrayComparable other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java
index e9cf7dce76d..7033428283a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java
@@ -79,22 +79,19 @@ public class ColumnPaginationFilter extends FilterBase {
   }
 
   /**
-   * n
-   */
+   *   */
   public int getLimit() {
     return limit;
   }
 
   /**
-   * n
-   */
+   *   */
   public int getOffset() {
     return offset;
   }
 
   /**
-   * n
-   */
+   *   */
   public byte[] getColumnOffset() {
     return columnOffset;
   }
@@ -174,8 +171,8 @@ public class ColumnPaginationFilter extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link ColumnPaginationFilter} instance
-   * @return An instance of {@link ColumnPaginationFilter} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link ColumnPaginationFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static ColumnPaginationFilter parseFrom(final byte[] pbBytes)
     throws DeserializationException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
index 1abd49fff3c..fb5311ed7b8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
@@ -164,8 +164,8 @@ public class ColumnRangeFilter extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link ColumnRangeFilter} instance
-   * @return An instance of {@link ColumnRangeFilter} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link ColumnRangeFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static ColumnRangeFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.ColumnRangeFilter proto;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
index 0b3b85dbb47..3749e845457 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
@@ -68,9 +68,7 @@ public class ColumnValueFilter extends FilterBase {
     this.comparator = Preconditions.checkNotNull(comparator, "Comparator should not be null");
   }
 
-  /**
-   * n
-   */
+  /** Returns operator */
   public CompareOperator getCompareOperator() {
     return op;
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
index 5b25e3b2ae1..443ee77a6bf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
@@ -101,8 +101,7 @@ public abstract class CompareFilter extends FilterBase {
   }
 
   /**
-   * n * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()}
-   * instead.
+   * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} instead.
    */
   @Deprecated
   public CompareOp getOperator() {
@@ -287,8 +286,8 @@ public abstract class CompareFilter extends FilterBase {
   }
 
   /**
-   * n * @return true if and only if the fields of the filter that are serialized are equal to the
-   * corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the filter that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(Filter o) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java
index 0a39f4c94f2..d4d12095443 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java
@@ -231,8 +231,8 @@ public class DependentColumnFilter extends CompareFilter {
 
   /**
    * @param pbBytes A pb serialized {@link DependentColumnFilter} instance
-   * @return An instance of {@link DependentColumnFilter} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link DependentColumnFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static DependentColumnFilter parseFrom(final byte[] pbBytes)
     throws DeserializationException {
@@ -259,8 +259,8 @@ public class DependentColumnFilter extends CompareFilter {
   }
 
   /**
-   * n * @return true if and only if the fields of the filter that are serialized are equal to the
-   * corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the filter that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(
       value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
index 917ac522de1..961b3868257 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
@@ -102,7 +102,8 @@ public class FamilyFilter extends CompareFilter {
 
   /**
    * @param pbBytes A pb serialized {@link FamilyFilter} instance
-   * @return An instance of {@link FamilyFilter} made from <code>bytes</code> n * @see #toByteArray
+   * @return An instance of {@link FamilyFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static FamilyFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.FamilyFilter proto;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
index 9df8a6f14f3..4b78e69efae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
@@ -241,7 +241,8 @@ public abstract class Filter {
    * Concrete implementers can signal a failure condition in their code by throwing an
    * {@link IOException}.
    * @param pbBytes A pb serialized {@link Filter} instance
-   * @return An instance of {@link Filter} made from <code>bytes</code> n * @see #toByteArray
+   * @return An instance of {@link Filter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static Filter parseFrom(final byte[] pbBytes) throws DeserializationException {
     throw new DeserializationException(
@@ -250,9 +251,9 @@ public abstract class Filter {
 
   /**
    * Concrete implementers can signal a failure condition in their code by throwing an
-   * {@link IOException}. n * @return true if and only if the fields of the filter that are
-   * serialized are equal to the corresponding fields in other. Used for testing.
-   * @throws IOException in case an I/O or an filter specific failure needs to be signaled.
+   * {@link IOException}.
+   * @return true if and only if the fields of the filter that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   abstract boolean areSerializedFieldsEqual(Filter other);
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
index e66022f6e7d..988725edad5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
@@ -148,9 +148,9 @@ public abstract class FilterBase extends Filter {
   }
 
   /**
-   * Default implementation so that writers of custom filters aren't forced to implement. n
-   * * @return true if and only if the fields of the filter that are serialized are equal to the
-   * corresponding fields in other. Used for testing.
+   * Default implementation so that writers of custom filters aren't forced to implement.
+   * @return true if and only if the fields of the filter that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(Filter other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
index 35313837843..1747ceb95a2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
@@ -84,7 +84,7 @@ final public class FilterList extends FilterBase {
 
   /**
    * Constructor that takes a var arg number of {@link Filter}s. The default operator MUST_PASS_ALL
-   * is assumed. n
+   * is assumed.
    */
   public FilterList(final Filter... filters) {
     this(Operator.MUST_PASS_ALL, Arrays.asList(filters));
@@ -108,14 +108,14 @@ final public class FilterList extends FilterBase {
   }
 
   /**
-   * Get the operator. n
+   * Get the operator.
    */
   public Operator getOperator() {
     return operator;
   }
 
   /**
-   * Get the filters. n
+   * Get the filters.
    */
   public List<Filter> getFilters() {
     return filterListBase.getFilters();
@@ -206,7 +206,8 @@ final public class FilterList extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link FilterList} instance
-   * @return An instance of {@link FilterList} made from <code>bytes</code> n * @see #toByteArray
+   * @return An instance of {@link FilterList} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static FilterList parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.FilterList proto;
@@ -229,8 +230,8 @@ final public class FilterList extends FilterBase {
   }
 
   /**
-   * n * @return true if and only if the fields of the filter that are serialized are equal to the
-   * corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the filter that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(Filter other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
index 2d36172064d..1bff5681746 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
@@ -92,7 +92,7 @@ public abstract class FilterListBase extends FilterBase {
    * the current child, we should set the traverse result (transformed cell) of previous node(s) as
    * the initial value. (HBASE-18879).
    * @param c The cell in question.
-   * @return the transformed cell. n
+   * @return the transformed cell.
    */
   @Override
   public Cell transformCell(Cell c) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
index b49e51f348c..5511c3e3bb8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
@@ -97,7 +97,8 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter {
   /**
    * @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance
    * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from
-   *         <code>bytes</code> n * @see #toByteArray
+   *         <code>bytes</code>
+   * @see #toByteArray
    */
   public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte[] pbBytes)
     throws DeserializationException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 6036aebdd57..c7c48419c00 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -138,7 +138,8 @@ public class FuzzyRowFilter extends FilterBase {
 
   /**
    * We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as
-   * fixed positions n * @return mask array
+   * fixed positions
+   * @return mask array
    */
   private byte[] preprocessMask(byte[] mask) {
     if (!UNSAFE_UNALIGNED) {
@@ -300,8 +301,8 @@ public class FuzzyRowFilter extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link FuzzyRowFilter} instance
-   * @return An instance of {@link FuzzyRowFilter} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link FuzzyRowFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static FuzzyRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.FuzzyRowFilter proto;
@@ -628,8 +629,8 @@ public class FuzzyRowFilter extends FilterBase {
 
   /**
    * For forward scanner, next cell hint should not contain any trailing zeroes unless they are part
-   * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01' nn * @param
-   * toInc - position of incremented byte
+   * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01'
+   * @param toInc - position of incremented byte
    * @return trimmed version of result
    */
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
index 67a214e5650..f28bfee8f46 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
@@ -93,8 +93,8 @@ public class InclusiveStopFilter extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link InclusiveStopFilter} instance
-   * @return An instance of {@link InclusiveStopFilter} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link InclusiveStopFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static InclusiveStopFilter parseFrom(final byte[] pbBytes)
     throws DeserializationException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
index 60b3a5c0480..bd4271e457c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
@@ -109,7 +109,8 @@ public class KeyOnlyFilter extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link KeyOnlyFilter} instance
-   * @return An instance of {@link KeyOnlyFilter} made from <code>bytes</code> n * @see #toByteArray
+   * @return An instance of {@link KeyOnlyFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static KeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.KeyOnlyFilter proto;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java
index 8f9a7f1dfc3..c7eb67612a7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java
@@ -78,8 +78,8 @@ public class LongComparator extends ByteArrayComparable {
   }
 
   /**
-   * n * @return true if and only if the fields of the comparator that are serialized are equal to
-   * the corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the comparator that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   boolean areSerializedFieldsEqual(LongComparator other) {
     if (other == this) return true;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java
index e7c868e3bbb..e3be6e6c2e7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java
@@ -135,8 +135,8 @@ public class MultipleColumnPrefixFilter extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link MultipleColumnPrefixFilter} instance
-   * @return An instance of {@link MultipleColumnPrefixFilter} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link MultipleColumnPrefixFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static MultipleColumnPrefixFilter parseFrom(final byte[] pbBytes)
     throws DeserializationException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java
index 6153e1e8080..64fbb4fc928 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java
@@ -72,8 +72,8 @@ public class NullComparator extends ByteArrayComparable {
 
   /**
    * @param pbBytes A pb serialized {@link NullComparator} instance
-   * @return An instance of {@link NullComparator} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link NullComparator} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static NullComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
     try {
@@ -86,8 +86,8 @@ public class NullComparator extends ByteArrayComparable {
   }
 
   /**
-   * n * @return true if and only if the fields of the comparator that are serialized are equal to
-   * the corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the comparator that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(ByteArrayComparable other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java
index 45c8a1f1a3f..aec2c84ba37 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java
@@ -107,7 +107,8 @@ public class PageFilter extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link PageFilter} instance
-   * @return An instance of {@link PageFilter} made from <code>bytes</code> n * @see #toByteArray
+   * @return An instance of {@link PageFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static PageFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.PageFilter proto;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
index 656ceaf3e1a..ece069d0c89 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
@@ -38,7 +38,7 @@ public class RandomRowFilter extends FilterBase {
   protected boolean filterOutRow;
 
   /**
-   * Create a new filter with a specified chance for a row to be included. n
+   * Create a new filter with a specified chance for a row to be included.
    */
   public RandomRowFilter(float chance) {
     this.chance = chance;
@@ -50,7 +50,7 @@ public class RandomRowFilter extends FilterBase {
   }
 
   /**
-   * Set the chance that a row is included. n
+   * Set the chance that a row is included.
    */
   public void setChance(float chance) {
     this.chance = chance;
@@ -115,8 +115,8 @@ public class RandomRowFilter extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link RandomRowFilter} instance
-   * @return An instance of {@link RandomRowFilter} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link RandomRowFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static RandomRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.RandomRowFilter proto;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java
index 4bdc057bd9e..2db7f7e7561 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java
@@ -153,8 +153,8 @@ public class RegexStringComparator extends ByteArrayComparable {
 
   /**
    * @param pbBytes A pb serialized {@link RegexStringComparator} instance
-   * @return An instance of {@link RegexStringComparator} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link RegexStringComparator} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static RegexStringComparator parseFrom(final byte[] pbBytes)
     throws DeserializationException {
@@ -183,8 +183,8 @@ public class RegexStringComparator extends ByteArrayComparable {
   }
 
   /**
-   * n * @return true if and only if the fields of the comparator that are serialized are equal to
-   * the corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the comparator that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(ByteArrayComparable other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
index fdefe382439..2978b8e3236 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
@@ -116,7 +116,8 @@ public class RowFilter extends CompareFilter {
 
   /**
    * @param pbBytes A pb serialized {@link RowFilter} instance
-   * @return An instance of {@link RowFilter} made from <code>bytes</code> n * @see #toByteArray
+   * @return An instance of {@link RowFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static RowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.RowFilter proto;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
index bbd8ec67994..62f6a046619 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
@@ -109,9 +109,9 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
   }
 
   /**
-   * Constructor for protobuf deserialization only. nnnnnn * @deprecated Since 2.0.0. Will be
-   * removed in 3.0.0. Use
-   * {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
+   * Constructor for protobuf deserialization only.
+   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+   *             {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
    */
   @Deprecated
   protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier,
@@ -122,7 +122,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
   }
 
   /**
-   * Constructor for protobuf deserialization only. nnnnnn
+   * Constructor for protobuf deserialization only.
    */
   protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier,
     final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing,
@@ -174,8 +174,8 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
 
   /**
    * @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance
-   * @return An instance of {@link SingleColumnValueExcludeFilter} made from <code>bytes</code> n
-   *         * @see #toByteArray
+   * @return An instance of {@link SingleColumnValueExcludeFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static SingleColumnValueExcludeFilter parseFrom(final byte[] pbBytes)
     throws DeserializationException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
index a68fdcd0577..8d7d8f5bce4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
@@ -155,10 +155,10 @@ public class SingleColumnValueFilter extends FilterBase {
   }
 
   /**
-   * Constructor for protobuf deserialization only. nnnnnn * @deprecated Since 2.0.0. Will be
-   * removed in 3.0.0. Use
-   * {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
-   * instead.
+   * Constructor for protobuf deserialization only.
+   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+   *             {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
+   *             instead.
    */
   @Deprecated
   protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
@@ -169,7 +169,7 @@ public class SingleColumnValueFilter extends FilterBase {
   }
 
   /**
-   * Constructor for protobuf deserialization only. nnnnnn
+   * Constructor for protobuf deserialization only.
    */
   protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
     final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator,
@@ -180,8 +180,7 @@ public class SingleColumnValueFilter extends FilterBase {
   }
 
   /**
-   * n * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()}
-   * instead.
+   * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} instead.
    */
   @Deprecated
   public CompareOp getOperator() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
index 4b23a87537d..274ecaf6228 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
@@ -112,7 +112,8 @@ public class SkipFilter extends FilterBase {
 
   /**
    * @param pbBytes A pb serialized {@link SkipFilter} instance
-   * @return An instance of {@link SkipFilter} made from <code>bytes</code> n * @see #toByteArray
+   * @return An instance of {@link SkipFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static SkipFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.SkipFilter proto;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
index b5fe22dca24..83ca05a171f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
@@ -77,8 +77,8 @@ public class SubstringComparator extends ByteArrayComparable {
 
   /**
    * @param pbBytes A pb serialized {@link SubstringComparator} instance
-   * @return An instance of {@link SubstringComparator} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link SubstringComparator} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static SubstringComparator parseFrom(final byte[] pbBytes)
     throws DeserializationException {
@@ -92,8 +92,8 @@ public class SubstringComparator extends ByteArrayComparable {
   }
 
   /**
-   * n * @return true if and only if the fields of the comparator that are serialized are equal to
-   * the corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the comparator that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(ByteArrayComparable other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
index eaeaaac3df1..910badbbb70 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
@@ -54,7 +54,7 @@ public class TimestampsFilter extends FilterBase {
   long minTimestamp = Long.MAX_VALUE;
 
   /**
-   * Constructor for filter that retains only the specified timestamps in the list. n
+   * Constructor for filter that retains only the specified timestamps in the list.
    */
   public TimestampsFilter(List<Long> timestamps) {
     this(timestamps, false);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java
index e726d88f720..461f80302cd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java
@@ -97,7 +97,8 @@ public class ValueFilter extends CompareFilter {
 
   /**
    * @param pbBytes A pb serialized {@link ValueFilter} instance
-   * @return An instance of {@link ValueFilter} made from <code>bytes</code> n * @see #toByteArray
+   * @return An instance of {@link ValueFilter} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static ValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.ValueFilter proto;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
index 9e9c0688ece..a6cac1043f2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
@@ -104,9 +104,10 @@ class CellBlockBuilder {
 
   /**
    * Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
-   * <code>compressor</code>. nnn * @return Null or byte buffer filled with a cellblock filled with
-   * passed-in Cells encoded using passed in <code>codec</code> and/or <code>compressor</code>; the
-   * returned buffer has been flipped and is ready for reading. Use limit to find total size. n
+   * <code>compressor</code>.
+   * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
+   *         passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has
+   *         been flipped and is ready for reading. Use limit to find total size.
    */
   public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
     final CellScanner cellScanner) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 28afef07e55..2095fc82cb9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -255,7 +255,7 @@ public final class ProtobufUtil {
    * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than
    * just {@link ServiceException}. Prefer this method to
    * {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf
-   * references. n
+   * references.
    */
   public static IOException handleRemoteException(Exception e) {
     return makeIOExceptionOfException(e);
@@ -359,7 +359,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Get to a client Get
    * @param proto the protocol buffer Get to convert
-   * @return the converted client Get n
+   * @return the converted client Get
    */
   public static Get toGet(final ClientProtos.Get proto) throws IOException {
     if (proto == null) return null;
@@ -444,7 +444,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to a Put.
    * @param proto The protocol buffer MutationProto to convert
-   * @return A client Put. n
+   * @return A client Put.
    */
   public static Put toPut(final MutationProto proto) throws IOException {
     return toPut(proto, null);
@@ -454,7 +454,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Mutate to a Put.
    * @param proto       The protocol buffer MutationProto to convert
    * @param cellScanner If non-null, the Cell data that goes with this proto.
-   * @return A client Put. n
+   * @return A client Put.
    */
   public static Put toPut(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -538,7 +538,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to a Delete
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted client Delete n
+   * @return the converted client Delete
    */
   public static Delete toDelete(final MutationProto proto) throws IOException {
     return toDelete(proto, null);
@@ -548,7 +548,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Mutate to a Delete
    * @param proto       the protocol buffer Mutate to convert
    * @param cellScanner if non-null, the data that goes with this delete.
-   * @return the converted client Delete n
+   * @return the converted client Delete
    */
   public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -675,9 +675,9 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer Mutate to an Append n * @param proto the protocol buffer Mutate to
-   * convert
-   * @return the converted client Append n
+   * Convert a protocol buffer Mutate to an Append
+   * @param proto the protocol buffer Mutate to convert
+   * @return the converted client Append
    */
   public static Append toAppend(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -695,7 +695,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to an Increment
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted client Increment n
+   * @return the converted client Increment
    */
   public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -714,7 +714,7 @@ public final class ProtobufUtil {
   /**
    * Convert a MutateRequest to Mutation
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted Mutation n
+   * @return the converted Mutation
    */
   public static Mutation toMutation(final MutationProto proto) throws IOException {
     MutationType type = proto.getMutateType();
@@ -735,7 +735,8 @@ public final class ProtobufUtil {
 
   /**
    * Convert a protocol buffer Mutate to a Get.
-   * @param proto the protocol buffer Mutate to convert. n * @return the converted client get. n
+   * @param proto the protocol buffer Mutate to convert.
+   * @return the converted client get.
    */
   public static Get toGet(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -815,7 +816,7 @@ public final class ProtobufUtil {
   /**
    * Convert a client Scan to a protocol buffer Scan
    * @param scan the client Scan to convert
-   * @return the converted protocol buffer Scan n
+   * @return the converted protocol buffer Scan
    */
   public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
     ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
@@ -908,7 +909,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Scan to a client Scan
    * @param proto the protocol buffer Scan to convert
-   * @return the converted client Scan n
+   * @return the converted client Scan
    */
   public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
     byte[] startRow = HConstants.EMPTY_START_ROW;
@@ -1009,7 +1010,7 @@ public final class ProtobufUtil {
   /**
    * Create a protocol buffer Get based on a client Get.
    * @param get the client Get
-   * @return a protocol buffer Get n
+   * @return a protocol buffer Get
    */
   public static ClientProtos.Get toGet(final Get get) throws IOException {
     ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder();
@@ -1074,7 +1075,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n
+   * Create a protocol buffer Mutate based on a client Mutation
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutation(final MutationType type, final Mutation mutation,
     final long nonce) throws IOException {
@@ -1123,8 +1125,8 @@ public final class ProtobufUtil {
 
   /**
    * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
-   * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a
-   * protobuf'd Mutation n
+   * Understanding is that the Cell will be transported other than via protobuf.
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation,
     final MutationProto.Builder builder) throws IOException {
@@ -1133,8 +1135,8 @@ public final class ProtobufUtil {
 
   /**
    * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
-   * Understanding is that the Cell will be transported other than via protobuf. nn * @return a
-   * protobuf'd Mutation n
+   * Understanding is that the Cell will be transported other than via protobuf.
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation)
     throws IOException {
@@ -1160,8 +1162,8 @@ public final class ProtobufUtil {
 
   /**
    * Code shared by {@link #toMutation(MutationType, Mutation)} and
-   * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd
-   * Mutation.
+   * {@link #toMutationNoData(MutationType, Mutation)}
+   * @return A partly-filled out protobuf'd Mutation.
    */
   private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
     final Mutation mutation, MutationProto.Builder builder) {
@@ -1266,7 +1268,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Result to a client Result
    * @param proto   the protocol buffer Result to convert
    * @param scanner Optional cell scanner.
-   * @return the converted client Result n
+   * @return the converted client Result
    */
   public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner)
     throws IOException {
@@ -1380,8 +1382,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer
-   * DeleteType n
+   * Convert a delete KeyValue type to protocol buffer DeleteType.
+   * @return protocol buffer DeleteType
    */
   public static DeleteType toDeleteType(KeyValue.Type type) throws IOException {
     switch (type) {
@@ -1401,7 +1403,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer DeleteType to delete KeyValue type.
    * @param type The DeleteType
-   * @return The type. n
+   * @return The type.
    */
   public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException {
     switch (type) {
@@ -1565,7 +1567,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with byte arrays
    * @param builder current message builder
-   * @param b       byte array n
+   * @param b       byte array
    */
   public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException {
     final CodedInputStream codedInput = CodedInputStream.newInstance(b);
@@ -1578,7 +1580,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with byte arrays
    * @param builder current message builder
-   * @param b       byte array nnn
+   * @param b       byte array
    */
   public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length)
     throws IOException {
@@ -1632,7 +1634,7 @@ public final class ProtobufUtil {
    *             magic and that is then followed by a protobuf that has a serialized
    *             {@link ServerName} in it.
    * @return Returns null if <code>data</code> is null else converts passed data to a ServerName
-   *         instance. n
+   *         instance.
    */
   public static ServerName toServerName(final byte[] data) throws DeserializationException {
     if (data == null || data.length <= 0) return null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
index d63f28cdab8..155c721b98a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
@@ -34,8 +34,7 @@ public class LeaseException extends DoNotRetryIOException {
   }
 
   /**
-   * n
-   */
+   *   */
   public LeaseException(String message) {
     super(message);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
index 2e2a3a895ce..c0330034810 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
@@ -33,8 +33,7 @@ public class FailedLogCloseException extends IOException {
   }
 
   /**
-   * n
-   */
+   *   */
   public FailedLogCloseException(String msg) {
     super(msg);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
index feab0b07f2f..a2a43203b64 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
@@ -32,8 +32,7 @@ public class FailedSyncBeforeLogCloseException extends FailedLogCloseException {
   }
 
   /**
-   * n
-   */
+   *   */
   public FailedSyncBeforeLogCloseException(String msg) {
     super(msg);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
index 7be1ac630e1..b2bf6a4f536 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
@@ -50,7 +50,7 @@ public abstract class AbstractHBaseSaslRpcClient {
    * @param token           token to use if needed by the authentication method
    * @param serverAddr      the address of the hbase service
    * @param securityInfo    the security details for the remote hbase service
-   * @param fallbackAllowed does the client allow fallback to simple authentication n
+   * @param fallbackAllowed does the client allow fallback to simple authentication
    */
   protected AbstractHBaseSaslRpcClient(Configuration conf,
     SaslClientAuthenticationProvider provider, Token<? extends TokenIdentifier> token,
@@ -66,7 +66,7 @@ public abstract class AbstractHBaseSaslRpcClient {
    * @param serverAddr      the address of the hbase service
    * @param securityInfo    the security details for the remote hbase service
    * @param fallbackAllowed does the client allow fallback to simple authentication
-   * @param rpcProtection   the protection level ("authentication", "integrity" or "privacy") n
+   * @param rpcProtection   the protection level ("authentication", "integrity" or "privacy")
    */
   protected AbstractHBaseSaslRpcClient(Configuration conf,
     SaslClientAuthenticationProvider provider, Token<? extends TokenIdentifier> token,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index 5a816877ba8..6c755f9a94c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -62,7 +62,7 @@ public final class EncryptionUtil {
    * @param conf      configuration
    * @param key       the raw key bytes
    * @param algorithm the algorithm to use with this key material
-   * @return the encrypted key bytes n
+   * @return the encrypted key bytes
    */
   public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm)
     throws IOException {
@@ -115,7 +115,7 @@ public final class EncryptionUtil {
    * @param conf    configuration
    * @param subject subject key alias
    * @param value   the encrypted key bytes
-   * @return the raw key bytes nn
+   * @return the raw key bytes
    */
   public static Key unwrapKey(Configuration conf, String subject, byte[] value)
     throws IOException, KeyException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
index 93ad9245f65..0394bb0f2a3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
@@ -86,7 +86,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
    * Do client side SASL authentication with server via the given InputStream and OutputStream
    * @param inS  InputStream to use
    * @param outS OutputStream to use
-   * @return true if connection is set up, or false if needs to switch to simple Auth. n
+   * @return true if connection is set up, or false if needs to switch to simple Auth.
    */
   public boolean saslConnect(InputStream inS, OutputStream outS) throws IOException {
     DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
@@ -185,7 +185,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
 
   /**
    * Get a SASL wrapped InputStream. Can be called only after saslConnect() has been called.
-   * @return a SASL wrapped InputStream n
+   * @return a SASL wrapped InputStream
    */
   public InputStream getInputStream() throws IOException {
     if (!saslClient.isComplete()) {
@@ -248,7 +248,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
 
   /**
    * Get a SASL wrapped OutputStream. Can be called only after saslConnect() has been called.
-   * @return a SASL wrapped OutputStream n
+   * @return a SASL wrapped OutputStream
    */
   public OutputStream getOutputStream() throws IOException {
     if (!saslClient.isComplete()) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
index c6c0c2ecde5..01cb8718bac 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
@@ -49,7 +49,7 @@ public class AccessControlClient {
   /**
    * Return true if authorization is supported and enabled
    * @param connection The connection to use
-   * @return true if authorization is supported and enabled, false otherwise n
+   * @return true if authorization is supported and enabled, false otherwise
    */
   public static boolean isAuthorizationEnabled(Connection connection) throws IOException {
     return connection.getAdmin().getSecurityCapabilities()
@@ -59,7 +59,7 @@ public class AccessControlClient {
   /**
    * Return true if cell authorization is supported and enabled
    * @param connection The connection to use
-   * @return true if cell authorization is supported and enabled, false otherwise n
+   * @return true if cell authorization is supported and enabled, false otherwise
    */
   public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException {
     return connection.getAdmin().getSecurityCapabilities()
@@ -74,9 +74,10 @@ public class AccessControlClient {
 
   /**
    * Grants permission on the specified table for the specified user
-   * @param connection The Connection instance to use nnnn * @param mergeExistingPermissions If set
-   *                   to false, later granted permissions will override previous granted
-   *                   permissions. otherwise, it'll merge with previous granted permissions. nn
+   * @param connection               The Connection instance to use
+   * @param mergeExistingPermissions If set to false, later granted permissions will override
+   *                                 previous granted permissions. otherwise, it'll merge with
+   *                                 previous granted permissions.
    */
   private static void grant(Connection connection, final TableName tableName, final String userName,
     final byte[] family, final byte[] qual, boolean mergeExistingPermissions,
@@ -89,7 +90,7 @@ public class AccessControlClient {
   /**
    * Grants permission on the specified table for the specified user. If permissions for a specified
    * user exists, later granted permissions will override previous granted permissions.
-   * @param connection The Connection instance to use nnnnnn
+   * @param connection The Connection instance to use
    */
   public static void grant(Connection connection, final TableName tableName, final String userName,
     final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable {
@@ -97,9 +98,10 @@ public class AccessControlClient {
   }
 
   /**
-   * Grants permission on the specified namespace for the specified user. nnn * @param
-   * mergeExistingPermissions If set to false, later granted permissions will override previous
-   * granted permissions. otherwise, it'll merge with previous granted permissions. nn
+   * Grants permission on the specified namespace for the specified user.
+   * @param mergeExistingPermissions If set to false, later granted permissions will override
+   *                                 previous granted permissions. otherwise, it'll merge with
+   *                                 previous granted permissions.
    */
   private static void grant(Connection connection, final String namespace, final String userName,
     boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable {
@@ -112,7 +114,7 @@ public class AccessControlClient {
    * Grants permission on the specified namespace for the specified user. If permissions on the
    * specified namespace exists, later granted permissions will override previous granted
    * permissions.
-   * @param connection The Connection instance to use nnnn
+   * @param connection The Connection instance to use
    */
   public static void grant(Connection connection, final String namespace, final String userName,
     final Permission.Action... actions) throws Throwable {
@@ -120,9 +122,10 @@ public class AccessControlClient {
   }
 
   /**
-   * Grant global permissions for the specified user. nn * @param mergeExistingPermissions If set to
-   * false, later granted permissions will override previous granted permissions. otherwise, it'll
-   * merge with previous granted permissions. nn
+   * Grant global permissions for the specified user.
+   * @param mergeExistingPermissions If set to false, later granted permissions will override
+   *                                 previous granted permissions. otherwise, it'll merge with
+   *                                 previous granted permissions.
    */
   private static void grant(Connection connection, final String userName,
     boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable {
@@ -133,7 +136,7 @@ public class AccessControlClient {
 
   /**
    * Grant global permissions for the specified user. If permissions for the specified user exists,
-   * later granted permissions will override previous granted permissions. nnnn
+   * later granted permissions will override previous granted permissions.
    */
   public static void grant(Connection connection, final String userName,
     final Permission.Action... actions) throws Throwable {
@@ -149,7 +152,7 @@ public class AccessControlClient {
 
   /**
    * Revokes the permission on the table
-   * @param connection The Connection instance to use nnnnnn
+   * @param connection The Connection instance to use
    */
   public static void revoke(Connection connection, final TableName tableName, final String username,
     final byte[] family, final byte[] qualifier, final Permission.Action... actions)
@@ -160,7 +163,7 @@ public class AccessControlClient {
 
   /**
    * Revokes the permission on the namespace for the specified user.
-   * @param connection The Connection instance to use nnnn
+   * @param connection The Connection instance to use
    */
   public static void revoke(Connection connection, final String namespace, final String userName,
     final Permission.Action... actions) throws Throwable {
@@ -184,7 +187,7 @@ public class AccessControlClient {
    * along with the list of superusers would be returned. Else, no rows get returned.
    * @param connection The Connection instance to use
    * @param tableRegex The regular expression string to match against
-   * @return List of UserPermissions n
+   * @return List of UserPermissions
    */
   public static List<UserPermission> getUserPermissions(Connection connection, String tableRegex)
     throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
index 6496aaa0bb8..8e7a00d8ea4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
@@ -452,8 +452,8 @@ public class AccessControlUtil {
    * It's also called by the shell, in case you want to find references.
    * @param protocol      the AccessControlService protocol proxy
    * @param userShortName the short name of the user to grant permissions
-   * @param actions       the permissions to be granted n * @deprecated Use
-   *                      {@link Admin#grant(UserPermission, boolean)} instead.
+   * @param actions       the permissions to be granted
+   * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
    */
   @Deprecated
   public static void grant(RpcController controller,
@@ -480,8 +480,8 @@ public class AccessControlUtil {
    * @param tableName     optional table name
    * @param f             optional column family
    * @param q             optional qualifier
-   * @param actions       the permissions to be granted n * @deprecated Use
-   *                      {@link Admin#grant(UserPermission, boolean)} instead.
+   * @param actions       the permissions to be granted
+   * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
    */
   @Deprecated
   public static void grant(RpcController controller,
@@ -506,8 +506,8 @@ public class AccessControlUtil {
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
    * @param namespace  the short name of the user to grant permissions
-   * @param actions    the permissions to be granted n * @deprecated Use
-   *                   {@link Admin#grant(UserPermission, boolean)} instead.
+   * @param actions    the permissions to be granted
+   * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
    */
   @Deprecated
   public static void grant(RpcController controller,
@@ -623,9 +623,8 @@ public class AccessControlUtil {
    * A utility used to get user's global permissions based on the specified user name.
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
-   * @param userName   User name, if empty then all user permissions will be retrieved. n
-   *                   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
-   *                   instead.
+   * @param userName   User name, if empty then all user permissions will be retrieved.
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -653,8 +652,8 @@ public class AccessControlUtil {
    * It's also called by the shell, in case you want to find references.
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
-   * @param t          optional table name n * @deprecated Use
-   *                   {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+   * @param t          optional table name
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -670,9 +669,8 @@ public class AccessControlUtil {
    * @param t               optional table name
    * @param columnFamily    Column family
    * @param columnQualifier Column qualifier
-   * @param userName        User name, if empty then all user permissions will be retrieved. n
-   *                        * @deprecated Use
-   *                        {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+   * @param userName        User name, if empty then all user permissions will be retrieved.
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -710,8 +708,8 @@ public class AccessControlUtil {
    * It's also called by the shell, in case you want to find references.
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
-   * @param namespace  name of the namespace n * @deprecated Use
-   *                   {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+   * @param namespace  name of the namespace
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -724,9 +722,8 @@ public class AccessControlUtil {
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
    * @param namespace  name of the namespace
-   * @param userName   User name, if empty then all user permissions will be retrieved. n
-   *                   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
-   *                   instead.
+   * @param userName   User name, if empty then all user permissions will be retrieved.
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -764,8 +761,8 @@ public class AccessControlUtil {
    *                        will not be considered if columnFamily is passed as null or empty.
    * @param userName        User name, it shouldn't be null or empty.
    * @param actions         Actions
-   * @return true if access allowed, otherwise false n * @deprecated Use
-   *         {@link Admin#hasUserPermissions(String, List)} instead.
+   * @return true if access allowed, otherwise false
+   * @deprecated Use {@link Admin#hasUserPermissions(String, List)} instead.
    */
   @Deprecated
   public static boolean hasPermission(RpcController controller,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
index cca5e8362ef..c08ce7c6cf7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
@@ -55,7 +55,7 @@ public class VisibilityClient {
   /**
    * Return true if cell visibility features are supported and enabled
    * @param connection The connection to use
-   * @return true if cell visibility features are supported and enabled, false otherwise n
+   * @return true if cell visibility features are supported and enabled, false otherwise
    */
   public static boolean isCellVisibilityEnabled(Connection connection) throws IOException {
     return connection.getAdmin().getSecurityCapabilities()
@@ -63,8 +63,8 @@ public class VisibilityClient {
   }
 
   /**
-   * Utility method for adding label to the system. nnnn * @deprecated Use
-   * {@link #addLabel(Connection,String)} instead.
+   * Utility method for adding label to the system.
+   * @deprecated Use {@link #addLabel(Connection,String)} instead.
    */
   @Deprecated
   public static VisibilityLabelsResponse addLabel(Configuration conf, final String label)
@@ -75,7 +75,7 @@ public class VisibilityClient {
   }
 
   /**
-   * Utility method for adding label to the system. nnnn
+   * Utility method for adding label to the system.
    */
   public static VisibilityLabelsResponse addLabel(Connection connection, final String label)
     throws Throwable {
@@ -83,8 +83,8 @@ public class VisibilityClient {
   }
 
   /**
-   * Utility method for adding labels to the system. nnnn * @deprecated Use
-   * {@link #addLabels(Connection,String[])} instead.
+   * Utility method for adding labels to the system.
+   * @deprecated Use {@link #addLabels(Connection,String[])} instead.
    */
   @Deprecated
   public static VisibilityLabelsResponse addLabels(Configuration conf, final String[] labels)
@@ -95,7 +95,7 @@ public class VisibilityClient {
   }
 
   /**
-   * Utility method for adding labels to the system. nnnn
+   * Utility method for adding labels to the system.
    */
   public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels)
     throws Throwable {
@@ -133,8 +133,8 @@ public class VisibilityClient {
   }
 
   /**
-   * Sets given labels globally authorized for the user. nnnnn * @deprecated Use
-   * {@link #setAuths(Connection,String[],String)} instead.
+   * Sets given labels globally authorized for the user.
+   * @deprecated Use {@link #setAuths(Connection,String[],String)} instead.
    */
   @Deprecated
   public static VisibilityLabelsResponse setAuths(Configuration conf, final String[] auths,
@@ -145,7 +145,7 @@ public class VisibilityClient {
   }
 
   /**
-   * Sets given labels globally authorized for the user. nnnnn
+   * Sets given labels globally authorized for the user.
    */
   public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths,
     final String user) throws Throwable {
@@ -153,8 +153,8 @@ public class VisibilityClient {
   }
 
   /**
-   * nn * @return labels, the given user is globally authorized for. n * @deprecated Use
-   * {@link #getAuths(Connection,String)} instead.
+   * Returns labels, the given user is globally authorized for.
+   * @deprecated Use {@link #getAuths(Connection,String)} instead.
    */
   @Deprecated
   public static GetAuthsResponse getAuths(Configuration conf, final String user) throws Throwable {
@@ -164,8 +164,8 @@ public class VisibilityClient {
   }
 
   /**
-   * @param connection the Connection instance to use. n * @return labels, the given user is
-   *                   globally authorized for. n
+   * @param connection the Connection instance to use.
+   * @return labels, the given user is globally authorized for.
    */
   public static GetAuthsResponse getAuths(Connection connection, final String user)
     throws Throwable {
@@ -196,10 +196,10 @@ public class VisibilityClient {
   }
 
   /**
-   * Retrieve the list of visibility labels defined in the system. n * @param regex The regular
-   * expression to filter which labels are returned.
-   * @return labels The list of visibility labels defined in the system. n * @deprecated Use
-   *         {@link #listLabels(Connection,String)} instead.
+   * Retrieve the list of visibility labels defined in the system.
+   * @param regex The regular expression to filter which labels are returned.
+   * @return labels The list of visibility labels defined in the system.
+   * @deprecated Use {@link #listLabels(Connection,String)} instead.
    */
   @Deprecated
   public static ListLabelsResponse listLabels(Configuration conf, final String regex)
@@ -213,7 +213,7 @@ public class VisibilityClient {
    * Retrieve the list of visibility labels defined in the system.
    * @param connection The Connection instance to use.
    * @param regex      The regular expression to filter which labels are returned.
-   * @return labels The list of visibility labels defined in the system. n
+   * @return labels The list of visibility labels defined in the system.
    */
   public static ListLabelsResponse listLabels(Connection connection, final String regex)
     throws Throwable {
@@ -249,8 +249,8 @@ public class VisibilityClient {
   }
 
   /**
-   * Removes given labels from user's globally authorized list of labels. nnnnn * @deprecated Use
-   * {@link #clearAuths(Connection,String[],String)} instead.
+   * Removes given labels from user's globally authorized list of labels.
+   * @deprecated Use {@link #clearAuths(Connection,String[],String)} instead.
    */
   @Deprecated
   public static VisibilityLabelsResponse clearAuths(Configuration conf, final String[] auths,
@@ -261,7 +261,7 @@ public class VisibilityClient {
   }
 
   /**
-   * Removes given labels from user's globally authorized list of labels. nnnnn
+   * Removes given labels from user's globally authorized list of labels.
    */
   public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths,
     final String user) throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 75e987591d9..2584216aa09 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -356,7 +356,7 @@ public final class ProtobufUtil {
    * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than
    * just {@link ServiceException}. Prefer this method to
    * {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf
-   * references. n
+   * references.
    */
   public static IOException handleRemoteException(Exception e) {
     return makeIOExceptionOfException(e);
@@ -517,7 +517,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Get to a client Get
    * @param proto the protocol buffer Get to convert
-   * @return the converted client Get n
+   * @return the converted client Get
    */
   public static Get toGet(final ClientProtos.Get proto) throws IOException {
     if (proto == null) return null;
@@ -602,7 +602,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to a Put.
    * @param proto The protocol buffer MutationProto to convert
-   * @return A client Put. n
+   * @return A client Put.
    */
   public static Put toPut(final MutationProto proto) throws IOException {
     return toPut(proto, null);
@@ -612,7 +612,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Mutate to a Put.
    * @param proto       The protocol buffer MutationProto to convert
    * @param cellScanner If non-null, the Cell data that goes with this proto.
-   * @return A client Put. n
+   * @return A client Put.
    */
   public static Put toPut(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -696,7 +696,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to a Delete
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted client Delete n
+   * @return the converted client Delete
    */
   public static Delete toDelete(final MutationProto proto) throws IOException {
     return toDelete(proto, null);
@@ -706,7 +706,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Mutate to a Delete
    * @param proto       the protocol buffer Mutate to convert
    * @param cellScanner if non-null, the data that goes with this delete.
-   * @return the converted client Delete n
+   * @return the converted client Delete
    */
   public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -836,9 +836,9 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer Mutate to an Append n * @param proto the protocol buffer Mutate to
-   * convert
-   * @return the converted client Append n
+   * Convert a protocol buffer Mutate to an Append
+   * @param proto the protocol buffer Mutate to convert
+   * @return the converted client Append
    */
   public static Append toAppend(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -856,7 +856,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to an Increment
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted client Increment n
+   * @return the converted client Increment
    */
   public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -875,7 +875,7 @@ public final class ProtobufUtil {
   /**
    * Convert a MutateRequest to Mutation
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted Mutation n
+   * @return the converted Mutation
    */
   public static Mutation toMutation(final MutationProto proto) throws IOException {
     MutationType type = proto.getMutateType();
@@ -923,7 +923,7 @@ public final class ProtobufUtil {
   /**
    * Convert a client Scan to a protocol buffer Scan
    * @param scan the client Scan to convert
-   * @return the converted protocol buffer Scan n
+   * @return the converted protocol buffer Scan
    */
   public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
     ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
@@ -1020,7 +1020,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Scan to a client Scan
    * @param proto the protocol buffer Scan to convert
-   * @return the converted client Scan n
+   * @return the converted client Scan
    */
   public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
     byte[] startRow = HConstants.EMPTY_START_ROW;
@@ -1145,7 +1145,7 @@ public final class ProtobufUtil {
   /**
    * Create a protocol buffer Get based on a client Get.
    * @param get the client Get
-   * @return a protocol buffer Get n
+   * @return a protocol buffer Get
    */
   public static ClientProtos.Get toGet(final Get get) throws IOException {
     ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder();
@@ -1211,7 +1211,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n
+   * Create a protocol buffer Mutate based on a client Mutation
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutation(final MutationType type, final Mutation mutation,
     final long nonce) throws IOException {
@@ -1260,8 +1261,8 @@ public final class ProtobufUtil {
 
   /**
    * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
-   * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a
-   * protobuf'd Mutation n
+   * Understanding is that the Cell will be transported other than via protobuf.
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation,
     final MutationProto.Builder builder) throws IOException {
@@ -1270,8 +1271,8 @@ public final class ProtobufUtil {
 
   /**
    * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
-   * Understanding is that the Cell will be transported other than via protobuf. nn * @return a
-   * protobuf'd Mutation n
+   * Understanding is that the Cell will be transported other than via protobuf.
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation)
     throws IOException {
@@ -1297,8 +1298,8 @@ public final class ProtobufUtil {
 
   /**
    * Code shared by {@link #toMutation(MutationType, Mutation)} and
-   * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd
-   * Mutation.
+   * {@link #toMutationNoData(MutationType, Mutation)}
+   * @return A partly-filled out protobuf'd Mutation.
    */
   private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
     final Mutation mutation, MutationProto.Builder builder) {
@@ -1431,7 +1432,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Result to a client Result
    * @param proto   the protocol buffer Result to convert
    * @param scanner Optional cell scanner.
-   * @return the converted client Result n
+   * @return the converted client Result
    */
   public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner)
     throws IOException {
@@ -1546,8 +1547,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer
-   * DeleteType n
+   * Convert a delete KeyValue type to protocol buffer DeleteType.
+   * @return protocol buffer DeleteType
    */
   public static DeleteType toDeleteType(KeyValue.Type type) throws IOException {
     switch (type) {
@@ -1567,7 +1568,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer DeleteType to delete KeyValue type.
    * @param type The DeleteType
-   * @return The type. n
+   * @return The type.
    */
   public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException {
     switch (type) {
@@ -1658,7 +1659,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * A helper to close a region given a region name using admin protocol. nnn
+   * A helper to close a region given a region name using admin protocol.
    */
   public static void closeRegion(final RpcController controller,
     final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName)
@@ -1673,7 +1674,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * A helper to warmup a region given a region name using admin protocol nn *
+   * A helper to warmup a region given a region name using admin protocol
    */
   public static void warmupRegion(final RpcController controller,
     final AdminService.BlockingInterface admin,
@@ -1690,7 +1691,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * A helper to open a region using admin protocol. nnn
+   * A helper to open a region using admin protocol.
    */
   public static void openRegion(final RpcController controller,
     final AdminService.BlockingInterface admin, ServerName server,
@@ -1704,8 +1705,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * A helper to get the all the online regions on a region server using admin protocol. n * @return
-   * a list of online region info n
+   * A helper to get the all the online regions on a region server using admin protocol.
+   * @return a list of online region info
    */
   public static List<org.apache.hadoop.hbase.client.RegionInfo>
     getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException {
@@ -2041,7 +2042,8 @@ public final class ProtobufUtil {
   /**
    * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to
    * NOT print out data both because it can be big but also so we do not have data in our logs. Use
-   * judiciously. n * @return toString of passed <code>m</code>
+   * judiciously.
+   * @return toString of passed <code>m</code>
    */
   public static String getShortTextFormat(Message m) {
     if (m == null) return "null";
@@ -2188,8 +2190,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
-   * client CellVisibility
+   * Convert a protocol buffer CellVisibility to a client CellVisibility
+   * @return the converted client CellVisibility
    */
   public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
     if (proto == null) return null;
@@ -2197,8 +2199,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
-   * converted client CellVisibility n
+   * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
+   * @return the converted client CellVisibility
    */
   public static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException {
     if (protoBytes == null) return null;
@@ -2214,8 +2216,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
-   * protocol buffer CellVisibility
+   * Create a protocol buffer CellVisibility based on a client CellVisibility.
+   * @return a protocol buffer CellVisibility
    */
   public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
     ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@@ -2224,8 +2226,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer Authorizations to a client Authorizations n * @return the converted
-   * client Authorizations
+   * Convert a protocol buffer Authorizations to a client Authorizations
+   * @return the converted client Authorizations
    */
   public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) {
     if (proto == null) return null;
@@ -2233,8 +2235,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer Authorizations bytes to a client Authorizations n * @return the
-   * converted client Authorizations n
+   * Convert a protocol buffer Authorizations bytes to a client Authorizations
+   * @return the converted client Authorizations
    */
   public static Authorizations toAuthorizations(byte[] protoBytes) throws DeserializationException {
     if (protoBytes == null) return null;
@@ -2250,8 +2252,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Create a protocol buffer Authorizations based on a client Authorizations. n * @return a
-   * protocol buffer Authorizations
+   * Create a protocol buffer Authorizations based on a client Authorizations.
+   * @return a protocol buffer Authorizations
    */
   public static ClientProtos.Authorizations toAuthorizations(Authorizations authorizations) {
     ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder();
@@ -2262,8 +2264,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer TimeUnit to a client TimeUnit n * @return the converted client
-   * TimeUnit
+   * Convert a protocol buffer TimeUnit to a client TimeUnit
+   * @return the converted client TimeUnit
    */
   public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) {
     switch (proto) {
@@ -2286,8 +2288,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a client TimeUnit to a protocol buffer TimeUnit n * @return the converted protocol
-   * buffer TimeUnit
+   * Convert a client TimeUnit to a protocol buffer TimeUnit
+   * @return the converted protocol buffer TimeUnit
    */
   public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) {
     switch (timeUnit) {
@@ -2310,8 +2312,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer ThrottleType to a client ThrottleType n * @return the converted
-   * client ThrottleType
+   * Convert a protocol buffer ThrottleType to a client ThrottleType
+   * @return the converted client ThrottleType
    */
   public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) {
     switch (proto) {
@@ -2339,8 +2341,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a client ThrottleType to a protocol buffer ThrottleType n * @return the converted
-   * protocol buffer ThrottleType
+   * Convert a client ThrottleType to a protocol buffer ThrottleType
+   * @return the converted protocol buffer ThrottleType
    */
   public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) {
     switch (type) {
@@ -2368,8 +2370,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer QuotaScope to a client QuotaScope n * @return the converted client
-   * QuotaScope
+   * Convert a protocol buffer QuotaScope to a client QuotaScope
+   * @return the converted client QuotaScope
    */
   public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) {
     switch (proto) {
@@ -2382,8 +2384,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a client QuotaScope to a protocol buffer QuotaScope n * @return the converted protocol
-   * buffer QuotaScope
+   * Convert a client QuotaScope to a protocol buffer QuotaScope
+   * @return the converted protocol buffer QuotaScope
    */
   public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) {
     switch (scope) {
@@ -2396,8 +2398,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer QuotaType to a client QuotaType n * @return the converted client
-   * QuotaType
+   * Convert a protocol buffer QuotaType to a client QuotaType
+   * @return the converted client QuotaType
    */
   public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) {
     switch (proto) {
@@ -2410,8 +2412,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a client QuotaType to a protocol buffer QuotaType n * @return the converted protocol
-   * buffer QuotaType
+   * Convert a client QuotaType to a protocol buffer QuotaType
+   * @return the converted protocol buffer QuotaType
    */
   public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) {
     switch (type) {
@@ -2538,7 +2540,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding
    * buffers
    * @param builder current message builder
-   * @param in      Inputsream with delimited protobuf data n
+   * @param in      Inputsream with delimited protobuf data
    */
   public static void mergeDelimitedFrom(Message.Builder builder, InputStream in)
     throws IOException {
@@ -2560,7 +2562,7 @@ public final class ProtobufUtil {
    * where the message size is known
    * @param builder current message builder
    * @param in      InputStream containing protobuf data
-   * @param size    known size of protobuf data n
+   * @param size    known size of protobuf data
    */
   public static void mergeFrom(Message.Builder builder, InputStream in, int size)
     throws IOException {
@@ -2574,7 +2576,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers
    * where the message size is not known
    * @param builder current message builder
-   * @param in      InputStream containing protobuf data n
+   * @param in      InputStream containing protobuf data
    */
   public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException {
     final CodedInputStream codedInput = CodedInputStream.newInstance(in);
@@ -2587,7 +2589,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with ByteStrings
    * @param builder current message builder
-   * @param bs      ByteString containing the n
+   * @param bs      ByteString containing the
    */
   public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException {
     final CodedInputStream codedInput = bs.newCodedInput();
@@ -2600,7 +2602,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with byte arrays
    * @param builder current message builder
-   * @param b       byte array n
+   * @param b       byte array
    */
   public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException {
     final CodedInputStream codedInput = CodedInputStream.newInstance(b);
@@ -2613,7 +2615,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with byte arrays
    * @param builder current message builder
-   * @param b       byte array nnn
+   * @param b       byte array
    */
   public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length)
     throws IOException {
@@ -2782,7 +2784,7 @@ public final class ProtobufUtil {
 
   /**
    * Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state
-   * @param state the protobuf CompactionState n
+   * @param state the protobuf CompactionState
    */
   public static CompactionState createCompactionState(GetRegionInfoResponse.CompactionState state) {
     return CompactionState.valueOf(state.toString());
@@ -2794,7 +2796,7 @@ public final class ProtobufUtil {
 
   /**
    * Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state
-   * @param state the protobuf CompactionState n
+   * @param state the protobuf CompactionState
    */
   public static CompactionState
     createCompactionStateForRegionLoad(RegionLoad.CompactionState state) {
@@ -2899,9 +2901,7 @@ public final class ProtobufUtil {
       stats.getCompactionPressure());
   }
 
-  /**
-   * n * @return A String version of the passed in <code>msg</code>
-   */
+  /** Returns A String version of the passed in <code>msg</code> */
   public static String toText(Message msg) {
     return TextFormat.shortDebugString(msg);
   }
@@ -2911,7 +2911,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. n
+   * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it.
    */
   public static <T> T call(Callable<T> callable) throws IOException {
     try {
@@ -3022,7 +3022,7 @@ public final class ProtobufUtil {
    *             magic and that is then followed by a protobuf that has a serialized
    *             {@link ServerName} in it.
    * @return Returns null if <code>data</code> is null else converts passed data to a ServerName
-   *         instance. n
+   *         instance.
    */
   public static ServerName parseServerNameFrom(final byte[] data) throws DeserializationException {
     if (data == null || data.length <= 0) return null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 89d8acceae6..925f03436e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -191,7 +191,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for a client increment nnnnnn * @return a mutate request
+   * Create a protocol buffer MutateRequest for a client increment
+   * @return a mutate request
    */
   public static MutateRequest buildIncrementRequest(final byte[] regionName, final byte[] row,
     final byte[] family, final byte[] qualifier, final long amount, final Durability durability,
@@ -225,7 +226,7 @@ public final class RequestConverter {
 
   /**
    * Create a protocol buffer MutateRequest for a conditioned put/delete/increment/append
-   * @return a mutate request n
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final byte[] row,
     final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value,
@@ -245,7 +246,7 @@ public final class RequestConverter {
 
   /**
    * Create a protocol buffer MultiRequest for conditioned row mutations
-   * @return a multi request n
+   * @return a multi request
    */
   public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName,
     final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op,
@@ -302,7 +303,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for a put nn * @return a mutate request n
+   * Create a protocol buffer MutateRequest for a put
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final Put put)
     throws IOException {
@@ -314,7 +316,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for an append nn * @return a mutate request n
+   * Create a protocol buffer MutateRequest for an append
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final Append append,
     long nonceGroup, long nonce) throws IOException {
@@ -330,7 +333,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for a client increment nn * @return a mutate request
+   * Create a protocol buffer MutateRequest for a client increment
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final Increment increment,
     final long nonceGroup, final long nonce) throws IOException {
@@ -346,7 +350,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for a delete nn * @return a mutate request n
+   * Create a protocol buffer MutateRequest for a delete
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete)
     throws IOException {
@@ -366,7 +371,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer ScanRequest for a client Scan nnnn * @return a scan request n
+   * Create a protocol buffer ScanRequest for a client Scan
+   * @return a scan request
    */
   public static ScanRequest buildScanRequest(byte[] regionName, Scan scan, int numberOfRows,
     boolean closeScanner) throws IOException {
@@ -386,7 +392,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer ScanRequest for a scanner id nnn * @return a scan request
+   * Create a protocol buffer ScanRequest for a scanner id
+   * @return a scan request
    */
   public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
     boolean trackMetrics) {
@@ -401,7 +408,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer ScanRequest for a scanner id nnnn * @return a scan request
+   * Create a protocol buffer ScanRequest for a scanner id
+   * @return a scan request
    */
   public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
     long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) {
@@ -421,7 +429,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer bulk load request nnnnn * @return a bulk load request
+   * Create a protocol buffer bulk load request
+   * @return a bulk load request
    */
   public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
     final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
@@ -431,7 +440,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer bulk load request nnnnnn * @return a bulk load request
+   * Create a protocol buffer bulk load request
+   * @return a bulk load request
    */
   public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
     final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
@@ -488,7 +498,7 @@ public final class RequestConverter {
    * @param mutationBuilder     mutationBuilder to be used to build mutation.
    * @param nonceGroup          nonceGroup to be applied.
    * @param indexMap            Map of created RegionAction to the original index for a
-   *                            RowMutations/CheckAndMutate within the original list of actions n
+   *                            RowMutations/CheckAndMutate within the original list of actions
    */
   public static void buildRegionActions(final byte[] regionName, final List<Action> actions,
     final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder,
@@ -651,7 +661,7 @@ public final class RequestConverter {
    * @param mutationBuilder     mutationBuilder to be used to build mutation.
    * @param nonceGroup          nonceGroup to be applied.
    * @param indexMap            Map of created RegionAction to the original index for a
-   *                            RowMutations/CheckAndMutate within the original list of actions n
+   *                            RowMutations/CheckAndMutate within the original list of actions
    */
   public static void buildNoDataRegionActions(final byte[] regionName,
     final Iterable<Action> actions, final List<CellScannable> cells,
@@ -1018,7 +1028,8 @@ public final class RequestConverter {
   /**
    * Create a CompactRegionRequest for a given region name
    * @param regionName the name of the region to get info
-   * @param major      indicator if it is a major compaction n * @return a CompactRegionRequest
+   * @param major      indicator if it is a major compaction
+   * @return a CompactRegionRequest
    */
   public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
     byte[] columnFamily) {
@@ -1076,7 +1087,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer AddColumnRequest nn * @return an AddColumnRequest
+   * Create a protocol buffer AddColumnRequest
+   * @return an AddColumnRequest
    */
   public static AddColumnRequest buildAddColumnRequest(final TableName tableName,
     final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -1089,7 +1101,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer DeleteColumnRequest nn * @return a DeleteColumnRequest
+   * Create a protocol buffer DeleteColumnRequest
+   * @return a DeleteColumnRequest
    */
   public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName,
     final byte[] columnName, final long nonceGroup, final long nonce) {
@@ -1102,7 +1115,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer ModifyColumnRequest nn * @return an ModifyColumnRequest
+   * Create a protocol buffer ModifyColumnRequest
+   * @return an ModifyColumnRequest
    */
   public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName,
     final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -1115,7 +1129,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MoveRegionRequest nn * @return A MoveRegionRequest
+   * Create a protocol buffer MoveRegionRequest
+   * @return A MoveRegionRequest
    */
   public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName,
     ServerName destServerName) {
@@ -1156,7 +1171,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer AssignRegionRequest n * @return an AssignRegionRequest
+   * Create a protocol buffer AssignRegionRequest
+   * @return an AssignRegionRequest
    */
   public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) {
     AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder();
@@ -1165,7 +1181,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer UnassignRegionRequest n * @return an UnassignRegionRequest
+   * Creates a protocol buffer UnassignRegionRequest
+   * @return an UnassignRegionRequest
    */
   public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) {
     UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder();
@@ -1174,7 +1191,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer OfflineRegionRequest n * @return an OfflineRegionRequest
+   * Creates a protocol buffer OfflineRegionRequest
+   * @return an OfflineRegionRequest
    */
   public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) {
     OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder();
@@ -1183,7 +1201,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer DeleteTableRequest n * @return a DeleteTableRequest
+   * Creates a protocol buffer DeleteTableRequest
+   * @return a DeleteTableRequest
    */
   public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName,
     final long nonceGroup, final long nonce) {
@@ -1211,7 +1230,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer EnableTableRequest n * @return an EnableTableRequest
+   * Creates a protocol buffer EnableTableRequest
+   * @return an EnableTableRequest
    */
   public static EnableTableRequest buildEnableTableRequest(final TableName tableName,
     final long nonceGroup, final long nonce) {
@@ -1223,7 +1243,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer DisableTableRequest n * @return a DisableTableRequest
+   * Creates a protocol buffer DisableTableRequest
+   * @return a DisableTableRequest
    */
   public static DisableTableRequest buildDisableTableRequest(final TableName tableName,
     final long nonceGroup, final long nonce) {
@@ -1235,7 +1256,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer CreateTableRequest nn * @return a CreateTableRequest
+   * Creates a protocol buffer CreateTableRequest
+   * @return a CreateTableRequest
    */
   public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor,
     final byte[][] splitKeys, final long nonceGroup, final long nonce) {
@@ -1252,7 +1274,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer ModifyTableRequest nn * @return a ModifyTableRequest
+   * Creates a protocol buffer ModifyTableRequest
+   * @return a ModifyTableRequest
    */
   public static ModifyTableRequest buildModifyTableRequest(final TableName tableName,
     final TableDescriptor tableDesc, final long nonceGroup, final long nonce) {
@@ -1265,7 +1288,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer GetSchemaAlterStatusRequest n * @return a GetSchemaAlterStatusRequest
+   * Creates a protocol buffer GetSchemaAlterStatusRequest
+   * @return a GetSchemaAlterStatusRequest
    */
   public static GetSchemaAlterStatusRequest
     buildGetSchemaAlterStatusRequest(final TableName tableName) {
@@ -1275,7 +1299,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer GetTableDescriptorsRequest n * @return a GetTableDescriptorsRequest
+   * Creates a protocol buffer GetTableDescriptorsRequest
+   * @return a GetTableDescriptorsRequest
    */
   public static GetTableDescriptorsRequest
     buildGetTableDescriptorsRequest(final List<TableName> tableNames) {
@@ -1390,7 +1415,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer SetBalancerRunningRequest nn * @return a SetBalancerRunningRequest
+   * Creates a protocol buffer SetBalancerRunningRequest
+   * @return a SetBalancerRunningRequest
    */
   public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on,
     boolean synchronous) {
@@ -1475,8 +1501,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a request for querying the master the last flushed sequence Id for a region n * @return
-   * A {@link GetLastFlushedSequenceIdRequest}
+   * Creates a request for querying the master the last flushed sequence Id for a region
+   * @return A {@link GetLastFlushedSequenceIdRequest}
    */
   public static GetLastFlushedSequenceIdRequest
     buildGetLastFlushedSequenceIdRequest(byte[] regionName) {
@@ -1527,7 +1553,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer SetNormalizerRunningRequest n * @return a SetNormalizerRunningRequest
+   * Creates a protocol buffer SetNormalizerRunningRequest
+   * @return a SetNormalizerRunningRequest
    */
   public static SetNormalizerRunningRequest buildSetNormalizerRunningRequest(boolean on) {
     return SetNormalizerRunningRequest.newBuilder().setOn(on).build();
@@ -1635,7 +1662,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer CreateNamespaceRequest n * @return a CreateNamespaceRequest
+   * Creates a protocol buffer CreateNamespaceRequest
+   * @return a CreateNamespaceRequest
    */
   public static CreateNamespaceRequest
     buildCreateNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1645,7 +1673,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer ModifyNamespaceRequest n * @return a ModifyNamespaceRequest
+   * Creates a protocol buffer ModifyNamespaceRequest
+   * @return a ModifyNamespaceRequest
    */
   public static ModifyNamespaceRequest
     buildModifyNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1655,7 +1684,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer DeleteNamespaceRequest n * @return a DeleteNamespaceRequest
+   * Creates a protocol buffer DeleteNamespaceRequest
+   * @return a DeleteNamespaceRequest
    */
   public static DeleteNamespaceRequest buildDeleteNamespaceRequest(final String name) {
     DeleteNamespaceRequest.Builder builder = DeleteNamespaceRequest.newBuilder();
@@ -1664,8 +1694,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer GetNamespaceDescriptorRequest n * @return a
-   * GetNamespaceDescriptorRequest
+   * Creates a protocol buffer GetNamespaceDescriptorRequest
+   * @return a GetNamespaceDescriptorRequest
    */
   public static GetNamespaceDescriptorRequest
     buildGetNamespaceDescriptorRequest(final String name) {
@@ -1782,7 +1812,7 @@ public final class RequestConverter {
 
   /**
    * Creates IsSnapshotCleanupEnabledRequest to determine if auto snapshot cleanup based on TTL
-   * expiration is turned on n
+   * expiration is turned on
    */
   public static IsSnapshotCleanupEnabledRequest buildIsSnapshotCleanupEnabledRequest() {
     return IsSnapshotCleanupEnabledRequest.newBuilder().build();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
index 0a81db1cfb8..180698864fd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
@@ -90,7 +90,7 @@ public final class ResponseConverter {
    * @param request  the original protocol buffer MultiRequest
    * @param response the protocol buffer MultiResponse to convert
    * @param cells    Cells to go with the passed in <code>proto</code>. Can be null.
-   * @return the results that were in the MultiResponse (a Result or an Exception). n
+   * @return the results that were in the MultiResponse (a Result or an Exception).
    */
   public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
     final MultiResponse response, final CellScanner cells) throws IOException {
@@ -103,7 +103,7 @@ public final class ResponseConverter {
    * @param indexMap Used to support RowMutations/CheckAndMutate in batch
    * @param response the protocol buffer MultiResponse to convert
    * @param cells    Cells to go with the passed in <code>proto</code>. Can be null.
-   * @return the results that were in the MultiResponse (a Result or an Exception). n
+   * @return the results that were in the MultiResponse (a Result or an Exception).
    */
   public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
     final Map<Integer, Integer> indexMap, final MultiResponse response, final CellScanner cells)
@@ -265,7 +265,8 @@ public final class ResponseConverter {
   }
 
   /**
-   * Wrap a throwable to an action result. n * @return an action result builder
+   * Wrap a throwable to an action result.
+   * @return an action result builder
    */
   public static ResultOrException.Builder buildActionResult(final Throwable t) {
     ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -274,7 +275,8 @@ public final class ResponseConverter {
   }
 
   /**
-   * Wrap a throwable to an action result. n * @return an action result builder
+   * Wrap a throwable to an action result.
+   * @return an action result builder
    */
   public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r) {
     ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -282,9 +284,7 @@ public final class ResponseConverter {
     return builder;
   }
 
-  /**
-   * n * @return NameValuePair of the exception name to stringified version os exception.
-   */
+  /** Returns NameValuePair of the exception name to stringified version os exception. */
   public static NameBytesPair buildException(final Throwable t) {
     NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder();
     parameterBuilder.setName(t.getClass().getName());
@@ -325,7 +325,8 @@ public final class ResponseConverter {
   }
 
   /**
-   * A utility to build a GetServerInfoResponse. nn * @return the response
+   * A utility to build a GetServerInfoResponse.
+   * @return the response
    */
   public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName,
     final int webuiPort) {
@@ -340,7 +341,8 @@ public final class ResponseConverter {
   }
 
   /**
-   * A utility to build a GetOnlineRegionResponse. n * @return the response
+   * A utility to build a GetOnlineRegionResponse.
+   * @return the response
    */
   public static GetOnlineRegionResponse
     buildGetOnlineRegionResponse(final List<RegionInfo> regions) {
@@ -424,7 +426,7 @@ public final class ResponseConverter {
   }
 
   /**
-   * Create Results from the cells using the cells meta data. nnn
+   * Create Results from the cells using the cells meta data.
    */
   public static Result[] getResults(CellScanner cellScanner, ScanResponse response)
     throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
index 7ebbbf44ceb..9a964666e1d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
@@ -90,7 +90,7 @@ public class Writables {
    * @return The passed Writable after its readFields has been called fed by the passed
    *         <code>bytes</code> array or IllegalArgumentException if passed null or an empty
    *         <code>bytes</code> array.
-   * @throws IOException e n
+   * @throws IOException e
    */
   public static Writable getWritable(final byte[] bytes, final Writable w) throws IOException {
     return getWritable(bytes, 0, bytes.length, w);
@@ -106,7 +106,7 @@ public class Writables {
    * @return The passed Writable after its readFields has been called fed by the passed
    *         <code>bytes</code> array or IllegalArgumentException if passed null or an empty
    *         <code>bytes</code> array.
-   * @throws IOException e n
+   * @throws IOException e
    */
   public static Writable getWritable(final byte[] bytes, final int offset, final int length,
     final Writable w) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
index c476935a950..f7b9830c6f6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
@@ -147,7 +147,7 @@ public class ZNodePaths {
 
   /**
    * Parses the meta replicaId from the passed path.
-   * @param path the name of the full path which includes baseZNode. n
+   * @param path the name of the full path which includes baseZNode.
    */
   public int getMetaReplicaIdFromPath(String path) {
     // Extract the znode from path. The prefix is of the following format.
@@ -158,7 +158,7 @@ public class ZNodePaths {
 
   /**
    * Parse the meta replicaId from the passed znode
-   * @param znode the name of the znode, does not include baseZNode n
+   * @param znode the name of the znode, does not include baseZNode
    */
   public int getMetaReplicaIdFromZNode(String znode) {
     return znode.equals(metaZNodePrefix)
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index a3c9a2c429a..3b91708af6a 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -30,6 +30,7 @@ import java.util.Random;
 import java.util.SortedMap;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -148,7 +149,7 @@ public class TestClientNoCluster extends Configured implements Tool {
   }
 
   /**
-   * Remove the @Ignore to try out timeout and retry asettings n
+   * Remove the @Ignore to try out timeout and retry asettings
    */
   @Ignore
   @Test
@@ -179,7 +180,7 @@ public class TestClientNoCluster extends Configured implements Tool {
   }
 
   /**
-   * Test that operation timeout prevails over rpc default timeout and retries, etc. n
+   * Test that operation timeout prevails over rpc default timeout and retries, etc.
    */
   @Test
   public void testRpcTimeout() throws IOException {
@@ -560,8 +561,8 @@ public class TestClientNoCluster extends Configured implements Tool {
   }
 
   /**
-   * @param name region name or encoded region name. n * @return True if we are dealing with a
-   *             hbase:meta region.
+   * @param name region name or encoded region name.
+   * @return True if we are dealing with a hbase:meta region.
    */
   static boolean isMetaRegion(final byte[] name, final RegionSpecifierType type) {
     switch (type) {
@@ -616,8 +617,9 @@ public class TestClientNoCluster extends Configured implements Tool {
 
   /**
    * Format passed integer. Zero-pad. Copied from hbase-server PE class and small amendment. Make
-   * them share. n * @return Returns zero-prefixed 10-byte wide decimal version of passed number
-   * (Does absolute in case number is negative).
+   * them share.
+   * @return Returns zero-prefixed 10-byte wide decimal version of passed number (Does absolute in
+   *         case number is negative).
    */
   private static byte[] format(final long number) {
     byte[] b = new byte[10];
@@ -629,9 +631,7 @@ public class TestClientNoCluster extends Configured implements Tool {
     return b;
   }
 
-  /**
-   * nn * @return <code>count</code> regions
-   */
+  /** Returns <code>count</code> regions */
   private static HRegionInfo[] makeHRegionInfos(final byte[] tableName, final int count,
     final long namespaceSpan) {
     byte[] startKey = HConstants.EMPTY_BYTE_ARRAY;
@@ -651,9 +651,7 @@ public class TestClientNoCluster extends Configured implements Tool {
     return hris;
   }
 
-  /**
-   * n * @return Return <code>count</code> servernames.
-   */
+  /** Returns Return <code>count</code> servernames. */
   private static ServerName[] makeServerNames(final int count) {
     ServerName[] sns = new ServerName[count];
     for (int i = 0; i < count; i++) {
@@ -696,7 +694,7 @@ public class TestClientNoCluster extends Configured implements Tool {
   }
 
   /**
-   * Code for each 'client' to run. nnnn
+   * Code for each 'client' to run.
    */
   static void cycle(int id, final Configuration c, final Connection sharedConnection)
     throws IOException {
@@ -819,7 +817,7 @@ public class TestClientNoCluster extends Configured implements Tool {
 
   /**
    * Run a client instance against a faked up server.
-   * @param args TODO n
+   * @param args TODO
    */
   public static void main(String[] args) throws Exception {
     System.exit(ToolRunner.run(HBaseConfiguration.create(), new TestClientNoCluster(), args));
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
index cc329cd3d03..cce3ba4e4e3 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
@@ -42,7 +42,7 @@ public class TestDeleteTimeStamp {
   private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier");
 
   /*
-   * Test for verifying that the timestamp in delete object is being honored. n
+   * Test for verifying that the timestamp in delete object is being honored.
    */
   @Test
   public void testTimeStamp() {
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java
index 1dfd8f2d1d1..3a87c56af56 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java
@@ -61,7 +61,7 @@ public class TestSnapshotFromAdmin {
 
   /**
    * Test that the logic for doing 'correct' back-off based on exponential increase and the max-time
-   * passed from the server ensures the correct overall waiting for the snapshot to finish. n
+   * passed from the server ensures the correct overall waiting for the snapshot to finish.
    */
   @Test
   public void testBackoffLogic() throws Exception {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index 707391ef173..a406437987e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -95,7 +95,8 @@ public final class AuthUtil {
   /**
    * For kerberized cluster, return login user (from kinit or from keytab if specified). For
    * non-kerberized cluster, return system user.
-   * @param conf configuartion file n * @throws IOException login exception
+   * @param conf configuartion file
+   * @throws IOException login exception
    */
   @InterfaceAudience.Private
   public static User loginClient(Configuration conf) throws IOException {
@@ -155,7 +156,8 @@ public final class AuthUtil {
    * <p>
    * NOT recommend to use to method unless you're sure what you're doing, it is for canary only.
    * Please use User#loginClient.
-   * @param conf configuration file n * @throws IOException login exception
+   * @param conf configuration file
+   * @throws IOException login exception
    */
   private static User loginClientAsService(Configuration conf) throws IOException {
     UserProvider provider = UserProvider.instantiate(conf);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
index e5050b864ca..a29a98a8c09 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
@@ -54,7 +54,7 @@ public class ByteBufferKeyOnlyKeyValue extends ByteBufferExtendedCell {
 
   /**
    * A setter that helps to avoid object creation every time and whenever there is a need to create
-   * new OffheapKeyOnlyKeyValue. nnn
+   * new OffheapKeyOnlyKeyValue.
    */
   public void setKey(ByteBuffer key, int offset, int length) {
     setKey(key, offset, length, ByteBufferUtils.toShort(key, offset));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
index 28128ee37c6..677ed2295ce 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
@@ -48,7 +48,7 @@ public interface CellBuilder {
   Cell build();
 
   /**
-   * Remove all internal elements from builder. n
+   * Remove all internal elements from builder.
    */
   CellBuilder clear();
 }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index 5992b8b404b..426cf7c9395 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -663,9 +663,8 @@ public class CellComparatorImpl implements CellComparator {
   /**
    * Compares the row part of the cell with a simple plain byte[] like the stopRow in Scan. This
    * should be used with context where for hbase:meta cells the
-   * {{@link MetaCellComparator#META_COMPARATOR} should be used n * the cell to be compared n * the
-   * kv serialized byte[] to be compared with n * the offset in the byte[] n * the length in the
-   * byte[]
+   * {{@link MetaCellComparator#META_COMPARATOR} should be used the cell to be compared the kv
+   * serialized byte[] to be compared with the offset in the byte[] the length in the byte[]
    * @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger than byte[], -1
    *         otherwise
    */
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 604aa1dff0d..83e0c1c7fe7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -138,7 +138,8 @@ public final class CellUtil {
   /**
    * Returns tag value in a new byte array. If server-side, use {@link Tag#getValueArray()} with
    * appropriate {@link Tag#getValueOffset()} and {@link Tag#getValueLength()} instead to save on
-   * allocations. n * @return tag value in a new byte array.
+   * allocations.
+   * @return tag value in a new byte array.
    * @deprecated As of HBase-2.0. Will be removed in HBase-3.0
    */
   @Deprecated
@@ -151,7 +152,8 @@ public final class CellUtil {
   /**
    * Makes a column in family:qualifier form from separate byte arrays.
    * <p>
-   * Not recommended for usage as this is old-style API. nn * @return family:qualifier
+   * Not recommended for usage as this is old-style API.
+   * @return family:qualifier
    */
   public static byte[] makeColumn(byte[] family, byte[] qualifier) {
     return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier);
@@ -367,7 +369,8 @@ public final class CellUtil {
   }
 
   /**
-   * Copies the tags info into the tag portion of the cell nnn * @return position after tags
+   * Copies the tags info into the tag portion of the cell
+   * @return position after tags
    * @deprecated As of HBase-2.0. Will be removed in HBase-3.0.
    */
   @Deprecated
@@ -385,7 +388,8 @@ public final class CellUtil {
   }
 
   /**
-   * Copies the tags info into the tag portion of the cell nnn * @return position after tags
+   * Copies the tags info into the tag portion of the cell
+   * @return position after tags
    * @deprecated As of HBase-2.0. Will be removed in 3.0.
    */
   @Deprecated
@@ -427,7 +431,7 @@ public final class CellUtil {
   }
 
   /**
-   * n * @return cell's qualifier wrapped into a ByteBuffer.
+   * @return cell's qualifier wrapped into a ByteBuffer.
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
    */
   @Deprecated
@@ -509,8 +513,8 @@ public final class CellUtil {
   }
 
   /**
-   * Create a Cell with specific row. Other fields defaulted. n * @return Cell with passed row but
-   * all other fields are arbitrary
+   * Create a Cell with specific row. Other fields defaulted.
+   * @return Cell with passed row but all other fields are arbitrary
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link CellBuilder}
    *             instead
    */
@@ -520,8 +524,8 @@ public final class CellUtil {
   }
 
   /**
-   * Create a Cell with specific row and value. Other fields are defaulted. nn * @return Cell with
-   * passed row and value but all other fields are arbitrary
+   * Create a Cell with specific row and value. Other fields are defaulted.
+   * @return Cell with passed row and value but all other fields are arbitrary
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link CellBuilder}
    *             instead
    */
@@ -536,8 +540,8 @@ public final class CellUtil {
   }
 
   /**
-   * Create a Cell with specific row. Other fields defaulted. nnn * @return Cell with passed row but
-   * all other fields are arbitrary
+   * Create a Cell with specific row. Other fields defaulted.
+   * @return Cell with passed row but all other fields are arbitrary
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link CellBuilder}
    *             instead
    */
@@ -580,9 +584,7 @@ public final class CellUtil {
     return PrivateCellUtil.createCell(cell, value, tags);
   }
 
-  /**
-   * n * @return CellScanner interface over <code>cellIterables</code>
-   */
+  /** Returns CellScanner interface over <code>cellIterables</code> */
   public static CellScanner
     createCellScanner(final List<? extends CellScannable> cellScannerables) {
     return new CellScanner() {
@@ -608,17 +610,15 @@ public final class CellUtil {
     };
   }
 
-  /**
-   * n * @return CellScanner interface over <code>cellIterable</code>
-   */
+  /** Returns CellScanner interface over <code>cellIterable</code> */
   public static CellScanner createCellScanner(final Iterable<Cell> cellIterable) {
     if (cellIterable == null) return null;
     return createCellScanner(cellIterable.iterator());
   }
 
   /**
-   * n * @return CellScanner interface over <code>cellIterable</code> or null if <code>cells</code>
-   * is null
+   * Returns CellScanner interface over <code>cellIterable</code> or null if <code>cells</code> is
+   * null
    */
   public static CellScanner createCellScanner(final Iterator<Cell> cells) {
     if (cells == null) return null;
@@ -640,9 +640,7 @@ public final class CellUtil {
     };
   }
 
-  /**
-   * n * @return CellScanner interface over <code>cellArray</code>
-   */
+  /** Returns CellScanner interface over <code>cellArray</code> */
   public static CellScanner createCellScanner(final Cell[] cellArray) {
     return new CellScanner() {
       private final Cell[] cells = cellArray;
@@ -698,7 +696,7 @@ public final class CellUtil {
   }
 
   /**
-   * nn * @return True if the rows in <code>left</code> and <code>right</code> Cells match
+   * @return True if the rows in <code>left</code> and <code>right</code> Cells match
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Instead use
    *             {@link #matchingRows(Cell, Cell)}
    */
@@ -818,8 +816,8 @@ public final class CellUtil {
   }
 
   /**
-   * Finds if the qualifier part of the cell and the KV serialized byte[] are equal n * @param buf
-   * the serialized keyvalue format byte[]
+   * Finds if the qualifier part of the cell and the KV serialized byte[] are equal
+   * @param buf the serialized keyvalue format byte[]
    * @return true if the qualifier matches, false otherwise
    */
   public static boolean matchingQualifier(final Cell left, final byte[] buf) {
@@ -830,8 +828,8 @@ public final class CellUtil {
   }
 
   /**
-   * Finds if the qualifier part of the cell and the KV serialized byte[] are equal n * @param buf
-   * the serialized keyvalue format byte[]
+   * Finds if the qualifier part of the cell and the KV serialized byte[] are equal
+   * @param buf    the serialized keyvalue format byte[]
    * @param offset the offset of the qualifier in the byte[]
    * @param length the length of the qualifier in the byte[]
    * @return true if the qualifier matches, false otherwise
@@ -983,8 +981,9 @@ public final class CellUtil {
   /**
    * Estimate based on keyvalue's serialization format in the RPC layer. Note that there is an extra
    * SIZEOF_INT added to the size here that indicates the actual length of the cell for cases where
-   * cell's are serialized in a contiguous format (For eg in RPCs). n * @return Estimate of the
-   * <code>cell</code> size in bytes plus an extra SIZEOF_INT indicating the actual cell length.
+   * cell's are serialized in a contiguous format (For eg in RPCs).
+   * @return Estimate of the <code>cell</code> size in bytes plus an extra SIZEOF_INT indicating the
+   *         actual cell length.
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
    */
   @Deprecated
@@ -1002,16 +1001,15 @@ public final class CellUtil {
   }
 
   /**
-   * n * @return Sum of the lengths of all the elements in a Cell; does not count in any
-   * infrastructure
+   * Returns Sum of the lengths of all the elements in a Cell; does not count in any infrastructure
    */
   private static int getSumOfCellElementLengths(final Cell cell) {
     return getSumOfCellKeyElementLengths(cell) + cell.getValueLength() + cell.getTagsLength();
   }
 
   /**
-   * n * @return Sum of all elements that make up a key; does not include infrastructure, tags or
-   * values.
+   * @return Sum of all elements that make up a key; does not include infrastructure, tags or
+   *         values.
    */
   private static int getSumOfCellKeyElementLengths(final Cell cell) {
     return cell.getRowLength() + cell.getFamilyLength() + cell.getQualifierLength()
@@ -1035,7 +1033,8 @@ public final class CellUtil {
    * This is an estimate of the heap space occupied by a cell. When the cell is of type
    * {@link HeapSize} we call {@link HeapSize#heapSize()} so cell can give a correct value. In other
    * cases we just consider the bytes occupied by the cell components ie. row, CF, qualifier,
-   * timestamp, type, value and tags. n * @return estimate of the heap space
+   * timestamp, type, value and tags.
+   * @return estimate of the heap space
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
    *             {@link RawCell#getTags()}
    */
@@ -1046,7 +1045,8 @@ public final class CellUtil {
 
   /********************* tags *************************************/
   /**
-   * Util method to iterate through the tags nnn * @return iterator for the tags
+   * Util method to iterate through the tags
+   * @return iterator for the tags
    * @deprecated As of 2.0.0 and will be removed in 3.0.0 Instead use
    *             {@link PrivateCellUtil#tagsIterator(Cell)}
    */
@@ -1125,8 +1125,8 @@ public final class CellUtil {
 
   /**
    * Sets the given seqId to the cell. Marked as audience Private as of 1.2.0. Setting a Cell
-   * sequenceid is an internal implementation detail not for general public use. nn * @throws
-   * IOException when the passed cell is not of type {@link ExtendedCell}
+   * sequenceid is an internal implementation detail not for general public use.
+   * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    * @deprecated As of HBase-2.0. Will be removed in HBase-3.0
    */
   @Deprecated
@@ -1135,8 +1135,8 @@ public final class CellUtil {
   }
 
   /**
-   * Sets the given timestamp to the cell. nn * @throws IOException when the passed cell is not of
-   * type {@link ExtendedCell}
+   * Sets the given timestamp to the cell.
+   * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    * @deprecated As of HBase-2.0. Will be a LimitedPrivate API in HBase-3.0.
    */
   @Deprecated
@@ -1145,7 +1145,8 @@ public final class CellUtil {
   }
 
   /**
-   * Sets the given timestamp to the cell. n * @param ts buffer containing the timestamp value
+   * Sets the given timestamp to the cell.
+   * @param ts       buffer containing the timestamp value
    * @param tsOffset offset to the new timestamp
    * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    * @deprecated As of HBase-2.0. Will be a LimitedPrivate API in HBase-3.0.
@@ -1157,7 +1158,8 @@ public final class CellUtil {
 
   /**
    * Sets the given timestamp to the cell iff current timestamp is
-   * {@link HConstants#LATEST_TIMESTAMP}. nn * @return True if cell timestamp is modified.
+   * {@link HConstants#LATEST_TIMESTAMP}.
+   * @return True if cell timestamp is modified.
    * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    * @deprecated As of HBase-2.0. Will be removed in HBase-3.0
    */
@@ -1168,7 +1170,8 @@ public final class CellUtil {
 
   /**
    * Sets the given timestamp to the cell iff current timestamp is
-   * {@link HConstants#LATEST_TIMESTAMP}. n * @param ts buffer containing the timestamp value
+   * {@link HConstants#LATEST_TIMESTAMP}.
+   * @param ts       buffer containing the timestamp value
    * @param tsOffset offset to the new timestamp
    * @return True if cell timestamp is modified.
    * @throws IOException when the passed cell is not of type {@link ExtendedCell}
@@ -1182,8 +1185,8 @@ public final class CellUtil {
   /**
    * Writes the Cell's key part as it would have serialized in a KeyValue. The format is &lt;2 bytes
    * rk len&gt;&lt;rk&gt;&lt;1 byte cf len&gt;&lt;cf&gt;&lt;qualifier&gt;&lt;8 bytes
-   * timestamp&gt;&lt;1 byte type&gt; nn * @deprecated As of HBase-2.0. Will be removed in HBase-3.0
-   * n
+   * timestamp&gt;&lt;1 byte type&gt;
+   * @deprecated As of HBase-2.0. Will be removed in HBase-3.0
    */
   @Deprecated
   public static void writeFlatKey(Cell cell, DataOutputStream out) throws IOException {
@@ -1219,7 +1222,8 @@ public final class CellUtil {
    * Writes the row from the given cell to the output stream excluding the common prefix
    * @param out     The dataoutputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param rlength the row length n * @deprecated As of 2.0. Will be removed in hbase-3.0
+   * @param rlength the row length
+   * @deprecated As of 2.0. Will be removed in hbase-3.0
    */
   @Deprecated
   public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength,
@@ -1233,9 +1237,7 @@ public final class CellUtil {
     }
   }
 
-  /**
-   * n * @return The Key portion of the passed <code>cell</code> as a String.
-   */
+  /** Returns The Key portion of the passed <code>cell</code> as a String. */
   public static String getCellKeyAsString(Cell cell) {
     return getCellKeyAsString(cell,
       c -> Bytes.toStringBinary(c.getRowArray(), c.getRowOffset(), c.getRowLength()));
@@ -1275,9 +1277,9 @@ public final class CellUtil {
   /**
    * This method exists just to encapsulate how we serialize keys. To be replaced by a factory that
    * we query to figure what the Cell implementation is and then, what serialization engine to use
-   * and further, how to serialize the key for inclusion in hfile index. TODO. n * @return The key
-   * portion of the Cell serialized in the old-school KeyValue way or null if passed a null
-   * <code>cell</code>
+   * and further, how to serialize the key for inclusion in hfile index. TODO.
+   * @return The key portion of the Cell serialized in the old-school KeyValue way or null if passed
+   *         a null <code>cell</code>
    * @deprecated As of HBase-2.0. Will be removed in HBase-3.0
    */
   @Deprecated
@@ -1289,8 +1291,8 @@ public final class CellUtil {
   }
 
   /**
-   * Write rowkey excluding the common part. nnnnn * @deprecated As of HBase-2.0. Will be removed in
-   * HBase-3.0
+   * Write rowkey excluding the common part.
+   * @deprecated As of HBase-2.0. Will be removed in HBase-3.0
    */
   @Deprecated
   public static void writeRowKeyExcludingCommon(Cell cell, short rLen, int commonPrefix,
@@ -1494,7 +1496,8 @@ public final class CellUtil {
   }
 
   /**
-   * Compares the row of two keyvalues for equality nn * @return True if rows match.
+   * Compares the row of two keyvalues for equality
+   * @return True if rows match.
    */
   public static boolean matchingRows(final Cell left, final Cell right) {
     short lrowlength = left.getRowLength();
@@ -1526,8 +1529,8 @@ public final class CellUtil {
   }
 
   /**
-   * Compares the row and column of two keyvalues for equality nn * @return True if same row and
-   * column.
+   * Compares the row and column of two keyvalues for equality
+   * @return True if same row and column.
    */
   public static boolean matchingRowColumn(final Cell left, final Cell right) {
     short lrowlength = left.getRowLength();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
index ddbf71cac13..432556d2642 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
@@ -145,7 +145,7 @@ public class CompoundConfiguration extends Configuration {
   /**
    * Add Bytes map to config list. This map is generally created by HTableDescriptor or
    * HColumnDescriptor, but can be abstractly used. The added configuration overrides the previous
-   * ones if there are name collisions. n * Bytes map
+   * ones if there are name collisions. Bytes map
    * @return this, for builder pattern
    */
   public CompoundConfiguration addBytesMap(final Map<Bytes, Bytes> map) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
index b3b7a1c5e57..28e648ec466 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
@@ -41,7 +41,7 @@ public interface ExtendedCell extends RawCell, HeapSize {
    * &lt;tags&gt;</code>
    * @param out      Stream to which cell has to be written
    * @param withTags Whether to write tags.
-   * @return how many bytes are written. n
+   * @return how many bytes are written.
    */
   // TODO remove the boolean param once HBASE-16706 is done.
   default int write(OutputStream out, boolean withTags) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
index e93cb5178df..91b875cae9f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
@@ -200,7 +200,7 @@ public class HBaseConfiguration extends Configuration {
    * @param conf    configuration instance for accessing the passwords
    * @param alias   the name of the password element
    * @param defPass the default password
-   * @return String password or default password n
+   * @return String password or default password
    */
   public static String getPassword(Configuration conf, String alias, String defPass)
     throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 9dfe73bb0b5..49532d6e46c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -247,8 +247,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
     }
 
     /**
-     * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes. n
-     * * @return Type associated with passed code.
+     * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
+     * @return Type associated with passed code.
      */
     public static Type codeToType(final byte b) {
       Type t = codeArray[b & 0xff];
@@ -334,7 +334,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * <code>length</code>.
    * @param bytes  byte array
    * @param offset offset to start of the KeyValue
-   * @param length length of the KeyValue n
+   * @param length length of the KeyValue
    */
   public KeyValue(final byte[] bytes, final int offset, final int length, long ts) {
     this(bytes, offset, length, null, 0, 0, null, 0, 0, ts, Type.Maximum, null, 0, 0, null);
@@ -345,7 +345,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
   /**
    * Constructs KeyValue structure filled with null value. Sets type to
    * {@link KeyValue.Type#Maximum}
-   * @param row - row key (arbitrary byte array) n
+   * @param row - row key (arbitrary byte array)
    */
   public KeyValue(final byte[] row, final long timestamp) {
     this(row, null, null, timestamp, Type.Maximum, null);
@@ -353,7 +353,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
 
   /**
    * Constructs KeyValue structure filled with null value.
-   * @param row - row key (arbitrary byte array) n
+   * @param row - row key (arbitrary byte array)
    */
   public KeyValue(final byte[] row, final long timestamp, Type type) {
     this(row, null, null, timestamp, type, null);
@@ -387,7 +387,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param family    family name
    * @param qualifier column qualifier
    * @param timestamp version timestamp
-   * @param type      key type n
+   * @param type      key type
    */
   public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier,
     final long timestamp, Type type) {
@@ -400,7 +400,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param family    family name
    * @param qualifier column qualifier
    * @param timestamp version timestamp
-   * @param value     column value n
+   * @param value     column value
    */
   public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier,
     final long timestamp, final byte[] value) {
@@ -414,7 +414,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param qualifier column qualifier
    * @param timestamp version timestamp
    * @param value     column value
-   * @param tags      tags n
+   * @param tags      tags
    */
   public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier,
     final long timestamp, final byte[] value, final Tag[] tags) {
@@ -428,7 +428,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param qualifier column qualifier
    * @param timestamp version timestamp
    * @param value     column value
-   * @param tags      tags non-empty list of tags or null n
+   * @param tags      tags non-empty list of tags or null
    */
   public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier,
     final long timestamp, final byte[] value, final List<Tag> tags) {
@@ -444,7 +444,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param qualifier column qualifier
    * @param timestamp version timestamp
    * @param type      key type
-   * @param value     column value n
+   * @param value     column value
    */
   public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier,
     final long timestamp, Type type, final byte[] value) {
@@ -461,7 +461,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param qualifier column qualifier
    * @param timestamp version timestamp
    * @param type      key type
-   * @param value     column value n
+   * @param value     column value
    */
   public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier,
     final long timestamp, Type type, final byte[] value, final List<Tag> tags) {
@@ -476,7 +476,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param qualifier column qualifier
    * @param timestamp version timestamp
    * @param type      key type
-   * @param value     column value n
+   * @param value     column value
    */
   public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier,
     final long timestamp, Type type, final byte[] value, final byte[] tags) {
@@ -495,7 +495,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param type      key type
    * @param value     column value
    * @param voffset   value offset
-   * @param vlength   value length n
+   * @param vlength   value length
    */
   public KeyValue(byte[] row, byte[] family, byte[] qualifier, int qoffset, int qlength,
     long timestamp, Type type, byte[] value, int voffset, int vlength, List<Tag> tags) {
@@ -504,8 +504,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
   }
 
   /**
-   * nnnnnnnnnnn
-   */
+   *   */
   public KeyValue(byte[] row, byte[] family, byte[] qualifier, int qoffset, int qlength,
     long timestamp, Type type, byte[] value, int voffset, int vlength, byte[] tags) {
     this(row, 0, row == null ? 0 : row.length, family, 0, family == null ? 0 : family.length,
@@ -517,7 +516,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * Constructs KeyValue structure filled with specified values.
    * <p>
    * Column is split into two fields, family and qualifier.
-   * @param row row key n
+   * @param row row key
    */
   public KeyValue(final byte[] row, final int roffset, final int rlength, final byte[] family,
     final int foffset, final int flength, final byte[] qualifier, final int qoffset,
@@ -580,7 +579,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param value     column value
    * @param voffset   value offset
    * @param vlength   value length
-   * @param tags      tags n
+   * @param tags      tags
    */
   public KeyValue(final byte[] row, final int roffset, final int rlength, final byte[] family,
     final int foffset, final int flength, final byte[] qualifier, final int qoffset,
@@ -593,8 +592,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
   }
 
   /**
-   * nnnnnnnnnnnnnnn
-   */
+   *   */
   public KeyValue(final byte[] row, final int roffset, final int rlength, final byte[] family,
     final int foffset, final int flength, final byte[] qualifier, final int qoffset,
     final int qlength, final long timestamp, final Type type, final byte[] value, final int voffset,
@@ -615,7 +613,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param qlength   qualifier length
    * @param timestamp version timestamp
    * @param type      key type
-   * @param vlength   value length n
+   * @param vlength   value length
    */
   public KeyValue(final int rlength, final int flength, final int qlength, final long timestamp,
     final Type type, final int vlength) {
@@ -632,7 +630,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param qlength   qualifier length
    * @param timestamp version timestamp
    * @param type      key type
-   * @param vlength   value length nn
+   * @param vlength   value length
    */
   public KeyValue(final int rlength, final int flength, final int qlength, final long timestamp,
     final Type type, final int vlength, final int tagsLength) {
@@ -662,7 +660,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
 
   /**
    * Create an empty byte[] representing a KeyValue All lengths are preset and can be filled in
-   * later. nnnnnn * @return The newly created byte array.
+   * later.
+   * @return The newly created byte array.
    */
   private static byte[] createEmptyByteArray(final int rlength, int flength, int qlength,
     final long timestamp, final Type type, int vlength, int tagsLength) {
@@ -987,7 +986,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
 
   /**
    * Clones a KeyValue. This creates a copy, re-allocating the buffer.
-   * @return Fully copied clone of this KeyValue n
+   * @return Fully copied clone of this KeyValue
    */
   @Override
   public KeyValue clone() throws CloneNotSupportedException {
@@ -1338,15 +1337,14 @@ public class KeyValue implements ExtendedCell, Cloneable {
   }
 
   /**
-   * n
-   */
+   *   */
   @Override
   public long getTimestamp() {
     return getTimestamp(getKeyLength());
   }
 
   /**
-   * @param keylength Pass if you have it to save on a int creation. n
+   * @param keylength Pass if you have it to save on a int creation.
    */
   long getTimestamp(final int keylength) {
     int tsOffset = getTimestampOffset(keylength);
@@ -1416,9 +1414,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
     return new KeyValue(newBuffer);
   }
 
-  /**
-   * nn * @return Index of delimiter having started from start of <code>b</code> moving rightward.
-   */
+  /** Returns Index of delimiter having started from start of <code>b</code> moving rightward. */
   public static int getDelimiter(final byte[] b, int offset, final int length,
     final int delimiter) {
     if (b == null) {
@@ -1435,8 +1431,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
   }
 
   /**
-   * Find index of passed delimiter walking from end of buffer backwards. nn * @return Index of
-   * delimiter
+   * Find index of passed delimiter walking from end of buffer backwards.
+   * @return Index of delimiter
    */
   public static int getDelimiterInReverse(final byte[] b, final int offset, final int length,
     final int delimiter) {
@@ -1587,7 +1583,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
 
     /**
      * Compares the only the user specified portion of a Key. This is overridden by MetaComparator.
-     * nn * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
+     * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      */
     protected int compareRowKey(final Cell left, final Cell right) {
       return CellComparatorImpl.COMPARATOR.compareRows(left, right);
@@ -1595,8 +1591,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
 
     /**
      * Compares left to right assuming that left,loffset,llength and right,roffset,rlength are full
-     * KVs laid out in a flat byte[]s. nnnnnn * @return 0 if equal, &lt;0 if left smaller, &gt;0 if
-     * right smaller
+     * KVs laid out in a flat byte[]s.
+     * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      */
     public int compareFlatKey(byte[] left, int loffset, int llength, byte[] right, int roffset,
       int rlength) {
@@ -1685,17 +1681,15 @@ public class KeyValue implements ExtendedCell, Cloneable {
       return CellComparatorImpl.COMPARATOR.compareTimestamps(left, right);
     }
 
-    /**
-     * nn * @return Result comparing rows.
-     */
+    /** Returns Result comparing rows. */
     public int compareRows(final Cell left, final Cell right) {
       return compareRows(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
         right.getRowArray(), right.getRowOffset(), right.getRowLength());
     }
 
     /**
-     * Get the b[],o,l for left and right rowkey portions and compare. nnnnnn * @return 0 if equal,
-     * &lt;0 if left smaller, &gt;0 if right smaller
+     * Get the b[],o,l for left and right rowkey portions and compare.
+     * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      */
     public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset,
       int rlength) {
@@ -1733,7 +1727,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
     }
 
     /**
-     * Overridden nnnnnnn * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
+     * Overridden
+     * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      */
     @Override // SamePrefixComparator
     public int compareIgnoringPrefix(int commonPrefix, byte[] left, int loffset, int llength,
@@ -1771,8 +1766,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
      * Compare columnFamily, qualifier, timestamp, and key type (everything except the row). This
      * method is used both in the normal comparator and the "same-prefix" comparator. Note that we
      * are assuming that row portions of both KVs have already been parsed and found identical, and
-     * we don't validate that assumption here. n * the length of the common prefix of the two
-     * key-values being compared, including row length and row
+     * we don't validate that assumption here. the length of the common prefix of the two key-values
+     * being compared, including row length and row
      */
     private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, int llength,
       byte[] right, int roffset, int rlength, short rowlength) {
@@ -1866,8 +1861,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
     }
 
     /**
-     * Compares the row and column of two keyvalues for equality nn * @return True if same row and
-     * column.
+     * Compares the row and column of two keyvalues for equality
+     * @return True if same row and column.
      */
     public boolean matchingRowColumn(final Cell left, final Cell right) {
       short lrowlength = left.getRowLength();
@@ -1903,7 +1898,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
     }
 
     /**
-     * Compares the row of two keyvalues for equality nn * @return True if rows match.
+     * Compares the row of two keyvalues for equality
+     * @return True if rows match.
      */
     public boolean matchingRows(final Cell left, final Cell right) {
       short lrowlength = left.getRowLength();
@@ -1911,9 +1907,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
       return matchingRows(left, lrowlength, right, rrowlength);
     }
 
-    /**
-     * nnnn * @return True if rows match.
-     */
+    /** Returns True if rows match. */
     private boolean matchingRows(final Cell left, final short lrowlength, final Cell right,
       final short rrowlength) {
       return lrowlength == rrowlength && matchingRows(left.getRowArray(), left.getRowOffset(),
@@ -1952,8 +1946,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
     }
 
     /**
-     * This is a HFile block index key optimization. nn * @return 0 if equal, &lt;0 if left smaller,
-     * &gt;0 if right smaller
+     * This is a HFile block index key optimization.
+     * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      * @deprecated Since 0.99.2;
      */
     @Deprecated
@@ -2037,16 +2031,16 @@ public class KeyValue implements ExtendedCell, Cloneable {
    * @param in Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes
    *           copied from the steam.
    * @return KeyValue created by deserializing from <code>in</code> OR if we find a length of zero,
-   *         we will return null which can be useful marking a stream as done. n
+   *         we will return null which can be useful marking a stream as done.
    */
   public static KeyValue create(final DataInput in) throws IOException {
     return create(in.readInt(), in);
   }
 
   /**
-   * Create a KeyValue reading <code>length</code> from <code>in</code> nn * @return Created
-   * KeyValue OR if we find a length of zero, we will return null which can be useful marking a
-   * stream as done. n
+   * Create a KeyValue reading <code>length</code> from <code>in</code>
+   * @return Created KeyValue OR if we find a length of zero, we will return null which can be
+   *         useful marking a stream as done.
    */
   public static KeyValue create(int length, final DataInput in) throws IOException {
 
@@ -2062,8 +2056,9 @@ public class KeyValue implements ExtendedCell, Cloneable {
   }
 
   /**
-   * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. nn
-   * * @return Length written on stream n * @see #create(DataInput) for the inverse function
+   * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
+   * @return Length written on stream
+   * @see #create(DataInput) for the inverse function
    */
   public static long write(final KeyValue kv, final DataOutput out) throws IOException {
     // This is how the old Writables write used to serialize KVs. Need to figure way to make it
@@ -2077,8 +2072,9 @@ public class KeyValue implements ExtendedCell, Cloneable {
   /**
    * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do not
    * require a {@link DataOutput}, just take plain {@link OutputStream} Named <code>oswrite</code>
-   * so does not clash with {@link #write(KeyValue, DataOutput)} nnn * @return Length written on
-   * stream n * @see #create(DataInput) for the inverse function
+   * so does not clash with {@link #write(KeyValue, DataOutput)}
+   * @return Length written on stream
+   * @see #create(DataInput) for the inverse function
    * @see #write(KeyValue, DataOutput)
    * @see KeyValueUtil#oswrite(Cell, OutputStream, boolean)
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Instead use
@@ -2194,7 +2190,7 @@ public class KeyValue implements ExtendedCell, Cloneable {
 
     /**
      * A setter that helps to avoid object creation every time and whenever there is a need to
-     * create new KeyOnlyKeyValue. nnn
+     * create new KeyOnlyKeyValue.
      */
     public void setKey(byte[] key, int offset, int length) {
       this.bytes = key;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
index 4291d904fe8..ed3687e9ed4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
@@ -55,8 +55,8 @@ public class KeyValueTestUtil {
 
   /**
    * Checks whether KeyValues from kvCollection2 are contained in kvCollection1. The comparison is
-   * made without distinguishing MVCC version of the KeyValues nn * @return true if KeyValues from
-   * kvCollection2 are contained in kvCollection1
+   * made without distinguishing MVCC version of the KeyValues
+   * @return true if KeyValues from kvCollection2 are contained in kvCollection1
    */
   public static boolean containsIgnoreMvccVersion(Collection<? extends Cell> kvCollection1,
     Collection<? extends Cell> kvCollection2) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index 87b2adf02aa..368e3d69a04 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -59,8 +59,8 @@ public class KeyValueUtil {
 
   /**
    * Returns number of bytes this cell's key part would have been used if serialized as in
-   * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type. n * @return the
-   * key length
+   * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type.
+   * @return the key length
    */
   public static int keyLength(final Cell cell) {
     return keyLength(cell.getRowLength(), cell.getFamilyLength(), cell.getQualifierLength());
@@ -97,8 +97,8 @@ public class KeyValueUtil {
   }
 
   /**
-   * The position will be set to the beginning of the new ByteBuffer n * @return the Bytebuffer
-   * containing the key part of the cell
+   * The position will be set to the beginning of the new ByteBuffer
+   * @return the Bytebuffer containing the key part of the cell
    */
   public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
     byte[] bytes = new byte[keyLength(cell)];
@@ -108,8 +108,8 @@ public class KeyValueUtil {
   }
 
   /**
-   * Copies the key to a new KeyValue n * @return the KeyValue that consists only the key part of
-   * the incoming cell
+   * Copies the key to a new KeyValue
+   * @return the KeyValue that consists only the key part of the incoming cell
    */
   public static KeyValue toNewKeyCell(final Cell cell) {
     byte[] bytes = new byte[keyLength(cell)];
@@ -204,7 +204,7 @@ public class KeyValueUtil {
 
   /**
    * Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
-   * position to the start of the next KeyValue. Does not allocate a new array or copy data. nnn
+   * position to the start of the next KeyValue. Does not allocate a new array or copy data.
    */
   public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion,
     boolean includesTags) {
@@ -238,7 +238,8 @@ public class KeyValueUtil {
 
   /**
    * Decrement the timestamp. For tests (currently wasteful) Remember timestamps are sorted reverse
-   * chronologically. n * @return previous key
+   * chronologically.
+   * @return previous key
    */
   public static KeyValue previousKey(final KeyValue in) {
     return createFirstOnRow(CellUtil.cloneRow(in), CellUtil.cloneFamily(in),
@@ -248,9 +249,8 @@ public class KeyValueUtil {
   /**
    * Create a KeyValue for the specified row, family and qualifier that would be larger than or
    * equal to all other possible KeyValues that have the same row, family, qualifier. Used for
-   * reseeking. Should NEVER be returned to a client. n * row key n * row offset n * row length n *
-   * family name n * family offset n * family length n * column qualifier n * qualifier offset n *
-   * qualifier length
+   * reseeking. Should NEVER be returned to a client. row key row offset row length family name
+   * family offset family length column qualifier qualifier offset qualifier length
    * @return Last possible key on passed row, family, qualifier.
    */
   public static KeyValue createLastOnRow(final byte[] row, final int roffset, final int rlength,
@@ -408,11 +408,11 @@ public class KeyValueUtil {
 
   /*************** misc **********************************/
   /**
-   * n * @return <code>cell</code> if it is an object of class {@link KeyValue} else we will return
-   * a new {@link KeyValue} instance made from <code>cell</code> Note: Even if the cell is an object
-   * of any of the subclass of {@link KeyValue}, we will create a new {@link KeyValue} object
-   * wrapping same buffer. This API is used only with MR based tools which expect the type to be
-   * exactly KeyValue. That is the reason for doing this way.
+   * @return <code>cell</code> if it is an object of class {@link KeyValue} else we will return a
+   *         new {@link KeyValue} instance made from <code>cell</code> Note: Even if the cell is an
+   *         object of any of the subclass of {@link KeyValue}, we will create a new
+   *         {@link KeyValue} object wrapping same buffer. This API is used only with MR based tools
+   *         which expect the type to be exactly KeyValue. That is the reason for doing this way.
    * @deprecated without any replacement.
    */
   @Deprecated
@@ -444,8 +444,9 @@ public class KeyValueUtil {
   }
 
   /**
-   * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. nn
-   * * @return Length written on stream n * @see #create(DataInput) for the inverse function
+   * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
+   * @return Length written on stream
+   * @see #create(DataInput) for the inverse function
    */
   public static long write(final KeyValue kv, final DataOutput out) throws IOException {
     // This is how the old Writables write used to serialize KVs. Need to figure
@@ -639,7 +640,7 @@ public class KeyValueUtil {
    * @param in       inputStream to read.
    * @param withTags whether the keyvalue should include tags are not
    * @return Created KeyValue OR if we find a length of zero, we will return null which can be
-   *         useful marking a stream as done. n
+   *         useful marking a stream as done.
    */
   public static KeyValue createKeyValueFromInputStream(InputStream in, boolean withTags)
     throws IOException {
@@ -663,24 +664,24 @@ public class KeyValueUtil {
   }
 
   /**
-   * n * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
-   * hfile index members to KeyValues.
+   * Returns a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+   * index members to KeyValues.
    */
   public static KeyValue createKeyValueFromKey(final byte[] b) {
     return createKeyValueFromKey(b, 0, b.length);
   }
 
   /**
-   * n * @return A KeyValue made of a byte buffer that holds the key-only part. Needed to convert
-   * hfile index members to KeyValues.
+   * Return a KeyValue made of a byte buffer that holds the key-only part. Needed to convert hfile
+   * index members to KeyValues.
    */
   public static KeyValue createKeyValueFromKey(final ByteBuffer bb) {
     return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit());
   }
 
   /**
-   * nnn * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
-   * hfile index members to KeyValues.
+   * Return a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+   * index members to KeyValues.
    */
   public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final int l) {
     byte[] newb = new byte[l + KeyValue.ROW_OFFSET];
@@ -691,19 +692,19 @@ public class KeyValueUtil {
   }
 
   /**
-   * n * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied
-   * from the steam.
+   * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied from
+   * the steam.
    * @return KeyValue created by deserializing from <code>in</code> OR if we find a length of zero,
-   *         we will return null which can be useful marking a stream as done. n
+   *         we will return null which can be useful marking a stream as done.
    */
   public static KeyValue create(final DataInput in) throws IOException {
     return create(in.readInt(), in);
   }
 
   /**
-   * Create a KeyValue reading <code>length</code> from <code>in</code> nn * @return Created
-   * KeyValue OR if we find a length of zero, we will return null which can be useful marking a
-   * stream as done. n
+   * Create a KeyValue reading <code>length</code> from <code>in</code>
+   * @return Created KeyValue OR if we find a length of zero, we will return null which can be
+   *         useful marking a stream as done.
    */
   public static KeyValue create(int length, final DataInput in) throws IOException {
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
index b3e70132dfc..9a6624d43f5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
@@ -869,7 +869,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Copies the tags info into the tag portion of the cell nnn * @return position after tags
+   * Copies the tags info into the tag portion of the cell
+   * @return position after tags
    */
   public static int copyTagsTo(Cell cell, byte[] destination, int destinationOffset) {
     int tlen = cell.getTagsLength();
@@ -885,7 +886,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Copies the tags info into the tag portion of the cell nnn * @return the position after tags
+   * Copies the tags info into the tag portion of the cell
+   * @return the position after tags
    */
   public static int copyTagsTo(Cell cell, ByteBuffer destination, int destinationOffset) {
     int tlen = cell.getTagsLength();
@@ -1002,7 +1004,7 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Write rowkey excluding the common part. nnnnn
+   * Write rowkey excluding the common part.
    */
   public static void writeRowKeyExcludingCommon(Cell cell, short rLen, int commonPrefix,
     DataOutputStream out) throws IOException {
@@ -1023,7 +1025,7 @@ public final class PrivateCellUtil {
    * Writes the row from the given cell to the output stream excluding the common prefix
    * @param out     The dataoutputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param rlength the row length n
+   * @param rlength the row length
    */
   public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength,
     int commonPrefix) throws IOException {
@@ -1211,8 +1213,8 @@ public final class PrivateCellUtil {
 
   /**
    * Compares only the key portion of a cell. It does not include the sequence id/mvcc of the cell
-   * nn * @return an int greater than 0 if left &gt; than right lesser than 0 if left &lt; than
-   * right equal to 0 if left is equal to right
+   * @return an int greater than 0 if left &gt; than right lesser than 0 if left &lt; than right
+   *         equal to 0 if left is equal to right
    */
   public static final int compareKeyIgnoresMvcc(CellComparator comparator, Cell left, Cell right) {
     return ((CellComparatorImpl) comparator).compare(left, right, true);
@@ -2171,7 +2173,7 @@ public final class PrivateCellUtil {
   /**
    * Writes the Cell's key part as it would have serialized in a KeyValue. The format is &lt;2 bytes
    * rk len&gt;&lt;rk&gt;&lt;1 byte cf len&gt;&lt;cf&gt;&lt;qualifier&gt;&lt;8 bytes
-   * timestamp&gt;&lt;1 byte type&gt; nnn
+   * timestamp&gt;&lt;1 byte type&gt;
    */
   public static void writeFlatKey(Cell cell, DataOutput out) throws IOException {
     short rowLen = cell.getRowLength();
@@ -2203,7 +2205,7 @@ public final class PrivateCellUtil {
   /**
    * Deep clones the given cell if the cell supports deep cloning
    * @param cell the cell to be cloned
-   * @return the cloned cell n
+   * @return the cloned cell
    */
   public static Cell deepClone(Cell cell) throws CloneNotSupportedException {
     if (cell instanceof ExtendedCell) {
@@ -2217,7 +2219,7 @@ public final class PrivateCellUtil {
    * @param cell     the cell to be written
    * @param out      the outputstream
    * @param withTags if tags are to be written or not
-   * @return the total bytes written n
+   * @return the total bytes written
    */
   public static int writeCell(Cell cell, OutputStream out, boolean withTags) throws IOException {
     if (cell instanceof ExtendedCell) {
@@ -2292,8 +2294,8 @@ public final class PrivateCellUtil {
 
   /**
    * Sets the given seqId to the cell. Marked as audience Private as of 1.2.0. Setting a Cell
-   * sequenceid is an internal implementation detail not for general public use. nn * @throws
-   * IOException when the passed cell is not of type {@link ExtendedCell}
+   * sequenceid is an internal implementation detail not for general public use.
+   * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    */
   public static void setSequenceId(Cell cell, long seqId) throws IOException {
     if (cell instanceof ExtendedCell) {
@@ -2305,8 +2307,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Sets the given timestamp to the cell. nn * @throws IOException when the passed cell is not of
-   * type {@link ExtendedCell}
+   * Sets the given timestamp to the cell.
+   * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    */
   public static void setTimestamp(Cell cell, long ts) throws IOException {
     if (cell instanceof ExtendedCell) {
@@ -2318,7 +2320,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Sets the given timestamp to the cell. n * @param ts buffer containing the timestamp value
+   * Sets the given timestamp to the cell.
+   * @param ts buffer containing the timestamp value
    * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    */
   public static void setTimestamp(Cell cell, byte[] ts) throws IOException {
@@ -2332,7 +2335,8 @@ public final class PrivateCellUtil {
 
   /**
    * Sets the given timestamp to the cell iff current timestamp is
-   * {@link HConstants#LATEST_TIMESTAMP}. nn * @return True if cell timestamp is modified.
+   * {@link HConstants#LATEST_TIMESTAMP}.
+   * @return True if cell timestamp is modified.
    * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    */
   public static boolean updateLatestStamp(Cell cell, long ts) throws IOException {
@@ -2345,7 +2349,8 @@ public final class PrivateCellUtil {
 
   /**
    * Sets the given timestamp to the cell iff current timestamp is
-   * {@link HConstants#LATEST_TIMESTAMP}. n * @param ts buffer containing the timestamp value
+   * {@link HConstants#LATEST_TIMESTAMP}.
+   * @param ts buffer containing the timestamp value
    * @return True if cell timestamp is modified.
    * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    */
@@ -2361,7 +2366,7 @@ public final class PrivateCellUtil {
    * Writes the row from the given cell to the output stream
    * @param out     The outputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param rlength the row length n
+   * @param rlength the row length
    */
   public static void writeRow(OutputStream out, Cell cell, short rlength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2376,7 +2381,7 @@ public final class PrivateCellUtil {
    * Writes the family from the given cell to the output stream
    * @param out     The outputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param flength the family length n
+   * @param flength the family length
    */
   public static void writeFamily(OutputStream out, Cell cell, byte flength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2391,7 +2396,7 @@ public final class PrivateCellUtil {
    * Writes the qualifier from the given cell to the output stream
    * @param out     The outputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param qlength the qualifier length n
+   * @param qlength the qualifier length
    */
   public static void writeQualifier(OutputStream out, Cell cell, int qlength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2407,7 +2412,7 @@ public final class PrivateCellUtil {
    * Writes the qualifier from the given cell to the output stream excluding the common prefix
    * @param out     The dataoutputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param qlength the qualifier length n
+   * @param qlength the qualifier length
    */
   public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell, int qlength,
     int commonPrefix) throws IOException {
@@ -2426,7 +2431,7 @@ public final class PrivateCellUtil {
    * Writes the value from the given cell to the output stream
    * @param out     The outputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param vlength the value length n
+   * @param vlength the value length
    */
   public static void writeValue(OutputStream out, Cell cell, int vlength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2441,7 +2446,7 @@ public final class PrivateCellUtil {
    * Writes the tag from the given cell to the output stream
    * @param out        The outputstream to which the data has to be written
    * @param cell       The cell whose contents has to be written
-   * @param tagsLength the tag length n
+   * @param tagsLength the tag length
    */
   public static void writeTags(OutputStream out, Cell cell, int tagsLength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2474,7 +2479,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the rowkey bytes of the given cell into an int value n * @return rowkey as int
+   * Converts the rowkey bytes of the given cell into an int value
+   * @return rowkey as int
    */
   public static int getRowAsInt(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2485,7 +2491,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the value bytes of the given cell into a long value n * @return value as long
+   * Converts the value bytes of the given cell into a long value
+   * @return value as long
    */
   public static long getValueAsLong(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2496,7 +2503,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the value bytes of the given cell into a int value n * @return value as int
+   * Converts the value bytes of the given cell into a int value
+   * @return value as int
    */
   public static int getValueAsInt(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2507,7 +2515,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the value bytes of the given cell into a double value n * @return value as double
+   * Converts the value bytes of the given cell into a double value
+   * @return value as double
    */
   public static double getValueAsDouble(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2518,7 +2527,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the value bytes of the given cell into a BigDecimal n * @return value as BigDecimal
+   * Converts the value bytes of the given cell into a BigDecimal
+   * @return value as BigDecimal
    */
   public static BigDecimal getValueAsBigDecimal(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2739,8 +2749,9 @@ public final class PrivateCellUtil {
   /**
    * Estimate based on keyvalue's serialization format in the RPC layer. Note that there is an extra
    * SIZEOF_INT added to the size here that indicates the actual length of the cell for cases where
-   * cell's are serialized in a contiguous format (For eg in RPCs). n * @return Estimate of the
-   * <code>cell</code> size in bytes plus an extra SIZEOF_INT indicating the actual cell length.
+   * cell's are serialized in a contiguous format (For eg in RPCs).
+   * @return Estimate of the <code>cell</code> size in bytes plus an extra SIZEOF_INT indicating the
+   *         actual cell length.
    */
   public static int estimatedSerializedSizeOf(final Cell cell) {
     return cell.getSerializedSize() + Bytes.SIZEOF_INT;
@@ -2760,9 +2771,9 @@ public final class PrivateCellUtil {
   /**
    * This method exists just to encapsulate how we serialize keys. To be replaced by a factory that
    * we query to figure what the Cell implementation is and then, what serialization engine to use
-   * and further, how to serialize the key for inclusion in hfile index. TODO. n * @return The key
-   * portion of the Cell serialized in the old-school KeyValue way or null if passed a null
-   * <code>cell</code>
+   * and further, how to serialize the key for inclusion in hfile index. TODO.
+   * @return The key portion of the Cell serialized in the old-school KeyValue way or null if passed
+   *         a null <code>cell</code>
    */
   public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) {
     if (cell == null) return null;
@@ -2772,8 +2783,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Create a Cell that is smaller than all other possible Cells for the given Cell's row. n
-   * * @return First possible Cell on passed Cell's row.
+   * Create a Cell that is smaller than all other possible Cells for the given Cell's row.
+   * @return First possible Cell on passed Cell's row.
    */
   public static Cell createFirstOnRow(final Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2837,8 +2848,8 @@ public final class PrivateCellUtil {
 
   /**
    * Create a Cell that is smaller than all other possible Cells for the given Cell's rk:cf and
-   * passed qualifier. nnnn * @return Last possible Cell on passed Cell's rk:cf and passed
-   * qualifier.
+   * passed qualifier.
+   * @return Last possible Cell on passed Cell's rk:cf and passed qualifier.
    */
   public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2858,7 +2869,7 @@ public final class PrivateCellUtil {
    * Creates the first cell with the row/family/qualifier of this cell and the given timestamp. Uses
    * the "maximum" type that guarantees that the new cell is the lowest possible for this
    * combination of row, family, qualifier, and timestamp. This cell's own timestamp is ignored.
-   * @param cell - cell n
+   * @param cell - cell
    */
   public static Cell createFirstOnRowColTS(Cell cell, long ts) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2876,8 +2887,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Create a Cell that is larger than all other possible Cells for the given Cell's row. n
-   * * @return Last possible Cell on passed Cell's row.
+   * Create a Cell that is larger than all other possible Cells for the given Cell's row.
+   * @return Last possible Cell on passed Cell's row.
    */
   public static Cell createLastOnRow(final Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2894,7 +2905,8 @@ public final class PrivateCellUtil {
   /**
    * Create a Cell that is larger than all other possible Cells for the given Cell's rk:cf:q. Used
    * in creating "fake keys" for the multi-column Bloom filter optimization to skip the row/column
-   * we already know is not in the file. n * @return Last possible Cell on passed Cell's rk:cf:q.
+   * we already know is not in the file.
+   * @return Last possible Cell on passed Cell's rk:cf:q.
    */
   public static Cell createLastOnRowCol(final Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
index fd0aa43190b..588bd1bc80b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
@@ -62,7 +62,7 @@ public interface Tag {
   boolean hasArray();
 
   /**
-   * @return The array containing the value bytes. n * when {@link #hasArray()} return false. Use
+   * @return The array containing the value bytes. when {@link #hasArray()} return false. Use
    *         {@link #getValueByteBuffer()} in such situation
    */
   byte[] getValueArray();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
index be8e4e769ba..9a2a29356b1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
@@ -95,7 +95,7 @@ public abstract class BaseDecoder implements Codec.Decoder {
   /**
    * Extract a Cell.
    * @return a parsed Cell or throws an Exception. EOFException or a generic IOException maybe
-   *         thrown if EOF is reached prematurely. Does not return null. n
+   *         thrown if EOF is reached prematurely. Does not return null.
    */
   @NonNull
   protected abstract Cell parseCell() throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
index e7facdbfbf2..f4552c03826 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
@@ -62,7 +62,7 @@ public class CellCodec implements Codec {
     }
 
     /**
-     * Write int length followed by array bytes. nnnn
+     * Write int length followed by array bytes.
      */
     private void write(final byte[] bytes, final int offset, final int length) throws IOException {
       // TODO add BB backed os check and do for write. Pass Cell
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
index 75e3d48d9fa..07bfb53d5df 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
@@ -65,7 +65,7 @@ public class CellCodecWithTags implements Codec {
     }
 
     /**
-     * Write int length followed by array bytes. nnnn
+     * Write int length followed by array bytes.
      */
     private void write(final byte[] bytes, final int offset, final int length) throws IOException {
       this.out.write(Bytes.toBytes(length));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java
index 480680b00f3..c7b82024bbe 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java
@@ -51,8 +51,8 @@ public abstract class ByteArrayComparable implements Comparable<byte[]> {
 
   /**
    * @param pbBytes A pb serialized {@link ByteArrayComparable} instance
-   * @return An instance of {@link ByteArrayComparable} made from <code>bytes</code> n * @see
-   *         #toByteArray
+   * @return An instance of {@link ByteArrayComparable} made from <code>bytes</code>
+   * @see #toByteArray
    */
   public static ByteArrayComparable parseFrom(final byte[] pbBytes)
     throws DeserializationException {
@@ -61,8 +61,8 @@ public abstract class ByteArrayComparable implements Comparable<byte[]> {
   }
 
   /**
-   * n * @return true if and only if the fields of the comparator that are serialized are equal to
-   * the corresponding fields in other. Used for testing.
+   * @return true if and only if the fields of the comparator that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   boolean areSerializedFieldsEqual(ByteArrayComparable other) {
     if (other == this) return true;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
index 86a2fefae7a..2b21546a72a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
@@ -82,7 +82,7 @@ public class ByteBufferOutputStream extends OutputStream implements ByteBufferWr
   }
 
   /**
-   * This flips the underlying BB so be sure to use it _last_! n
+   * This flips the underlying BB so be sure to use it _last_!
    */
   public ByteBuffer getByteBuffer() {
     curBuf.flip();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferWriterOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferWriterOutputStream.java
index 0dee9e22ade..9c27d90ec26 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferWriterOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferWriterOutputStream.java
@@ -61,8 +61,8 @@ public class ByteBufferWriterOutputStream extends OutputStream implements ByteBu
    * position of the ByteBuffer.
    * @param b   the ByteBuffer
    * @param off the start offset in the data
-   * @param len the number of bytes to write n * if an I/O error occurs. In particular, an
-   *            IOException is thrown if the output stream is closed.
+   * @param len the number of bytes to write if an I/O error occurs. In particular, an IOException
+   *            is thrown if the output stream is closed.
    */
   @Override
   public void write(ByteBuffer b, int off, int len) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
index 1613bd563d0..d1310137e8c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
@@ -38,14 +38,14 @@ public interface CellOutputStream {
    * Implementation must copy the entire state of the Cell. If the written Cell is modified
    * immediately after the write method returns, the modifications must have absolutely no effect on
    * the copy of the Cell that was added in the write.
-   * @param cell Cell to write out n
+   * @param cell Cell to write out
    */
   void write(Cell cell) throws IOException;
 
   /**
    * Let the implementation decide what to do. Usually means writing accumulated data into a byte[]
    * that can then be read from the implementation to be sent to disk, put in the block cache, or
-   * sent over the network. n
+   * sent over the network.
    */
   void flush() throws IOException;
 }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
index da897b04ae1..1ce3a116f1e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
@@ -99,7 +99,7 @@ public class ImmutableBytesWritable implements WritableComparable<ImmutableBytes
   }
 
   /**
-   * @param b Use passed bytes as backing array for this instance. nn
+   * @param b Use passed bytes as backing array for this instance.
    */
   public void set(final byte[] b, final int offset, final int length) {
     this.bytes = b;
@@ -132,8 +132,7 @@ public class ImmutableBytesWritable implements WritableComparable<ImmutableBytes
   }
 
   /**
-   * n
-   */
+   *   */
   public int getOffset() {
     return this.offset;
   }
@@ -174,8 +173,9 @@ public class ImmutableBytesWritable implements WritableComparable<ImmutableBytes
   }
 
   /**
-   * Compares the bytes in this object to the specified byte array n * @return Positive if left is
-   * bigger than right, 0 if they are equal, and negative if left is smaller than right.
+   * Compares the bytes in this object to the specified byte array
+   * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
+   *         smaller than right.
    */
   public int compareTo(final byte[] that) {
     return WritableComparator.compareBytes(this.bytes, this.offset, this.length, that, 0,
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
index b3c699c0070..8492c23d4da 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
@@ -57,7 +57,7 @@ public class TagCompressionContext {
    * @param out    Stream to which the compressed tags to be written
    * @param in     Source where tags are available
    * @param offset Offset for the tags bytes
-   * @param length Length of all tag bytes n
+   * @param length Length of all tag bytes
    */
   public void compressTags(OutputStream out, byte[] in, int offset, int length) throws IOException {
     int pos = offset;
@@ -76,7 +76,7 @@ public class TagCompressionContext {
    * @param out    Stream to which the compressed tags to be written
    * @param in     Source buffer where tags are available
    * @param offset Offset for the tags byte buffer
-   * @param length Length of all tag bytes n
+   * @param length Length of all tag bytes
    */
   public void compressTags(OutputStream out, ByteBuffer in, int offset, int length)
     throws IOException {
@@ -100,7 +100,7 @@ public class TagCompressionContext {
    * @param src    Stream where the compressed tags are available
    * @param dest   Destination array where to write the uncompressed tags
    * @param offset Offset in destination where tags to be written
-   * @param length Length of all tag bytes n
+   * @param length Length of all tag bytes
    */
   public void uncompressTags(InputStream src, byte[] dest, int offset, int length)
     throws IOException {
@@ -132,7 +132,7 @@ public class TagCompressionContext {
    * @param dest   Destination array where to write the uncompressed tags
    * @param offset Offset in destination where tags to be written
    * @param length Length of all tag bytes
-   * @return bytes count read from source to uncompress all tags. n
+   * @return bytes count read from source to uncompress all tags.
    */
   public int uncompressTags(ByteBuff src, byte[] dest, int offset, int length) throws IOException {
     int srcBeginPos = src.position();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
index 09647b4ce91..f0152968162 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
@@ -86,7 +86,7 @@ public abstract class Cipher {
    * @param out     the output stream to wrap
    * @param context the encryption context
    * @param iv      initialization vector
-   * @return the encrypting wrapper n
+   * @return the encrypting wrapper
    */
   public abstract OutputStream createEncryptionStream(OutputStream out, Context context, byte[] iv)
     throws IOException;
@@ -95,7 +95,7 @@ public abstract class Cipher {
    * Create an encrypting output stream given an initialized encryptor
    * @param out       the output stream to wrap
    * @param encryptor the encryptor
-   * @return the encrypting wrapper n
+   * @return the encrypting wrapper
    */
   public abstract OutputStream createEncryptionStream(OutputStream out, Encryptor encryptor)
     throws IOException;
@@ -105,7 +105,7 @@ public abstract class Cipher {
    * @param in      the input stream to wrap
    * @param context the encryption context
    * @param iv      initialization vector
-   * @return the decrypting wrapper n
+   * @return the decrypting wrapper
    */
   public abstract InputStream createDecryptionStream(InputStream in, Context context, byte[] iv)
     throws IOException;
@@ -114,7 +114,7 @@ public abstract class Cipher {
    * Create a decrypting output stream given an initialized decryptor
    * @param in        the input stream to wrap
    * @param decryptor the decryptor
-   * @return the decrypting wrapper n
+   * @return the decrypting wrapper
    */
   public abstract InputStream createDecryptionStream(InputStream in, Decryptor decryptor)
     throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
index 0d29fe990b9..93822784594 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface Decryptor {
 
   /**
-   * Set the secret key n
+   * Set the secret key
    */
   public void setKey(Key key);
 
@@ -45,12 +45,12 @@ public interface Decryptor {
   public int getBlockSize();
 
   /**
-   * Set the initialization vector n
+   * Set the initialization vector
    */
   public void setIv(byte[] iv);
 
   /**
-   * Create a stream for decryption n
+   * Create a stream for decryption
    */
   public InputStream createDecryptionStream(InputStream in);
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 761fe04d6fc..13e335b82ee 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -318,7 +318,7 @@ public final class Encryption {
    * <p>
    * The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
    * @param out ciphertext
-   * @param src plaintext nnnn
+   * @param src plaintext
    */
   public static void encrypt(OutputStream out, byte[] src, int offset, int length, Encryptor e)
     throws IOException {
@@ -333,7 +333,7 @@ public final class Encryption {
   /**
    * Encrypt a block of plaintext
    * @param out ciphertext
-   * @param src plaintext nnnnn
+   * @param src plaintext
    */
   public static void encrypt(OutputStream out, byte[] src, int offset, int length, Context context,
     byte[] iv) throws IOException {
@@ -349,7 +349,7 @@ public final class Encryption {
    * <p>
    * The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
    * @param out ciphertext
-   * @param in  plaintext nn
+   * @param in  plaintext
    */
   public static void encrypt(OutputStream out, InputStream in, Encryptor e) throws IOException {
     OutputStream cout = e.createEncryptionStream(out);
@@ -363,7 +363,7 @@ public final class Encryption {
   /**
    * Encrypt a stream of plaintext given a context and IV
    * @param out ciphertext
-   * @param in  plaintet nnn
+   * @param in  plaintet
    */
   public static void encrypt(OutputStream out, InputStream in, Context context, byte[] iv)
     throws IOException {
@@ -378,7 +378,6 @@ public final class Encryption {
    * Decrypt a block of ciphertext read in from a stream with the given cipher and context
    * <p>
    * The decryptor's state will be finalized. It should be reinitialized or returned to the pool.
-   * nnnnnn
    */
   public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize, Decryptor d)
     throws IOException {
@@ -391,7 +390,7 @@ public final class Encryption {
   }
 
   /**
-   * Decrypt a block of ciphertext from a stream given a context and IV nnnnnnn
+   * Decrypt a block of ciphertext from a stream given a context and IV
    */
   public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize,
     Context context, byte[] iv) throws IOException {
@@ -402,7 +401,7 @@ public final class Encryption {
   }
 
   /**
-   * Decrypt a stream of ciphertext given a decryptor nnnnn
+   * Decrypt a stream of ciphertext given a decryptor
    */
   public static void decrypt(OutputStream out, InputStream in, int outLen, Decryptor d)
     throws IOException {
@@ -425,7 +424,7 @@ public final class Encryption {
   }
 
   /**
-   * Decrypt a stream of ciphertext given a context and IV nnnnnn
+   * Decrypt a stream of ciphertext given a context and IV
    */
   public static void decrypt(OutputStream out, InputStream in, int outLen, Context context,
     byte[] iv) throws IOException {
@@ -436,7 +435,8 @@ public final class Encryption {
   }
 
   /**
-   * Resolves a key for the given subject nn * @return a key for the given subject
+   * Resolves a key for the given subject
+   * @return a key for the given subject
    * @throws IOException if the key is not found
    */
   public static Key getSecretKeyForSubject(String subject, Configuration conf) throws IOException {
@@ -460,7 +460,7 @@ public final class Encryption {
    * @param in     plaintext
    * @param conf   configuration
    * @param cipher the encryption algorithm
-   * @param iv     the initialization vector, can be null n
+   * @param iv     the initialization vector, can be null
    */
   public static void encryptWithSubjectKey(OutputStream out, InputStream in, String subject,
     Configuration conf, Cipher cipher, byte[] iv) throws IOException {
@@ -482,7 +482,7 @@ public final class Encryption {
    * @param subject the subject's key alias
    * @param conf    configuration
    * @param cipher  the encryption algorithm
-   * @param iv      the initialization vector, can be null n
+   * @param iv      the initialization vector, can be null
    */
   public static void decryptWithSubjectKey(OutputStream out, InputStream in, int outLen,
     String subject, Configuration conf, Cipher cipher, byte[] iv) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
index f030de3e174..34f0fa4c0f7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface Encryptor {
 
   /**
-   * Set the secret key n
+   * Set the secret key
    */
   public void setKey(Key key);
 
@@ -50,12 +50,12 @@ public interface Encryptor {
   public byte[] getIv();
 
   /**
-   * Set the initialization vector n
+   * Set the initialization vector
    */
   public void setIv(byte[] iv);
 
   /**
-   * Create a stream for encryption n
+   * Create a stream for encryption
    */
   public OutputStream createEncryptionStream(OutputStream out);
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
index 6c6ec5dd759..0852bc7f13f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
@@ -31,13 +31,13 @@ public interface KeyProvider {
   public static final String PASSWORDFILE = "passwordfile";
 
   /**
-   * Initialize the key provider n
+   * Initialize the key provider
    */
   public void init(String params);
 
   /**
-   * Retrieve the key for a given key aliase n * @return the keys corresponding to the supplied
-   * alias, or null if a key is not found
+   * Retrieve the key for a given key aliase
+   * @return the keys corresponding to the supplied alias, or null if a key is not found
    */
   public Key getKey(String alias);
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
index 4474efd4a31..236eeb2309b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
@@ -89,8 +89,10 @@ public interface DataBlockEncoder {
   EncodedSeeker createSeeker(HFileBlockDecodingContext decodingCtx);
 
   /**
-   * Creates a encoder specific encoding context n * encoding strategy used n * header bytes to be
-   * written, put a dummy header here if the header is unknown n * HFile meta data
+   * Creates a encoder specific encoding context
+   * @param encoding strategy used
+   * @param header   bytes to be written, put a dummy header here if the header is unknown
+   * @param meta     HFile meta data
    * @return a newly created encoding context
    */
   HFileBlockEncodingContext newDataBlockEncodingContext(DataBlockEncoding encoding,
@@ -98,7 +100,8 @@ public interface DataBlockEncoder {
 
   /**
    * Creates an encoder specific decoding context, which will prepare the data before actual
-   * decoding n * HFile meta data
+   * decoding
+   * @param meta HFile meta data
    * @return a newly created decoding context
    */
   HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext meta);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
index 71808adf75d..47cc9109498 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
@@ -98,7 +98,7 @@ public enum DataBlockEncoding {
   /**
    * Writes id bytes to the given array starting from offset.
    * @param dest   output array
-   * @param offset starting offset of the output array n
+   * @param offset starting offset of the output array
    */
   public void writeIdInBytes(byte[] dest, int offset) throws IOException {
     System.arraycopy(idInBytes, 0, dest, offset, ID_SIZE);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
index 8a3b7feb17e..e77738d9f85 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
@@ -69,7 +69,7 @@ public class EncodedDataBlock {
   /**
    * Create a buffer which will be encoded using dataBlockEncoder.
    * @param dataBlockEncoder Algorithm used for compression.
-   * @param encoding         encoding type used nn
+   * @param encoding         encoding type used
    */
   public EncodedDataBlock(DataBlockEncoder dataBlockEncoder, DataBlockEncoding encoding,
     byte[] rawKVs, HFileContext meta) {
@@ -174,7 +174,7 @@ public class EncodedDataBlock {
    * @param inputBuffer Array to be compressed.
    * @param offset      Offset to beginning of the data.
    * @param length      Length to be compressed.
-   * @return Size of compressed data in bytes. n
+   * @return Size of compressed data in bytes.
    */
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH_EXCEPTION",
       justification = "No sure what findbugs wants but looks to me like no NPE")
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
index 6835a8bac3c..63f173c38cc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
@@ -32,9 +32,9 @@ public interface HFileBlockDecodingContext {
   /**
    * Perform all actions that need to be done before the encoder's real decoding process.
    * Decompression needs to be done if {@link HFileContext#getCompression()} returns a valid
-   * compression algorithm. n * numBytes after block and encoding headers n * numBytes without
-   * header required to store the block after decompressing (not decoding) n * ByteBuffer pointed
-   * after the header but before the data n * on disk data to be decoded
+   * compression algorithm. numBytes after block and encoding headers numBytes without header
+   * required to store the block after decompressing (not decoding) ByteBuffer pointed after the
+   * header but before the data on disk data to be decoded
    */
   void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader,
     ByteBuff blockBufferWithoutHeader, ByteBuff onDiskBlock) throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
index 375a7a06232..a8bb90d4be7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
@@ -93,7 +93,8 @@ public class ThrottledInputStream extends InputStream {
 
   /**
    * Read bytes starting from the specified position. This requires rawStream is an instance of
-   * {@link PositionedReadable}. nnnn * @return the number of bytes read
+   * {@link PositionedReadable}.
+   * @return the number of bytes read
    */
   public int read(long position, byte[] buffer, int offset, int length) throws IOException {
     if (!(rawStream instanceof PositionedReadable)) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
index 157df98a9b0..b1ab8a9b28d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
@@ -80,7 +80,7 @@ public interface Dictionary {
    * @param data   the data to be written in byte[]
    * @param offset the offset
    * @param length length to be written
-   * @param dict   the dictionary whose contents are to written n
+   * @param dict   the dictionary whose contents are to written
    */
   public static void write(OutputStream out, byte[] data, int offset, int length, Dictionary dict)
     throws IOException {
@@ -103,7 +103,7 @@ public interface Dictionary {
    * @param data   the data to be written in ByteBuffer
    * @param offset the offset
    * @param length length to be written
-   * @param dict   the dictionary whose contents are to written n
+   * @param dict   the dictionary whose contents are to written
    */
   public static void write(OutputStream out, ByteBuffer data, int offset, int length,
     Dictionary dict) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
index 7cfa007478f..97e1e9d3345 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
@@ -118,8 +118,8 @@ public class StreamUtils {
   }
 
   /**
-   * Reads a varInt value stored in an array. n * Input array where the varInt is available n *
-   * Offset in the input array where varInt is available
+   * Reads a varInt value stored in an array. Input array where the varInt is available Offset in
+   * the input array where varInt is available
    * @return A pair of integers in which first value is the actual decoded varInt value and second
    *         value as number of bytes taken by this varInt for it's storage in the input array.
    * @throws IOException When varint is malformed and not able to be read correctly
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
index 2285917364f..8035bf38bbb 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
@@ -84,7 +84,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
   public abstract int position();
 
   /**
-   * Sets this ByteBuff's position to the given value. n * @return this object
+   * Sets this ByteBuff's position to the given value.
+   * @return this object
    */
   public abstract ByteBuff position(int position);
 
@@ -110,7 +111,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
   public abstract int limit();
 
   /**
-   * Marks the limit of this ByteBuff. n * @return This ByteBuff
+   * Marks the limit of this ByteBuff.
+   * @return This ByteBuff
    */
   public abstract ByteBuff limit(int limit);
 
@@ -200,32 +202,34 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
   public abstract byte get();
 
   /**
-   * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
-   * * @return the byte at the given index
+   * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
+   * @return the byte at the given index
    */
   public abstract byte get(int index);
 
   /**
    * Fetches the byte at the given offset from current position. Does not change position of the
-   * underlying ByteBuffers. n * @return the byte value at the given index.
+   * underlying ByteBuffers.
+   * @return the byte value at the given index.
    */
   public abstract byte getByteAfterPosition(int offset);
 
   /**
-   * Writes a byte to this ByteBuff at the current position and increments the position n * @return
-   * this object
+   * Writes a byte to this ByteBuff at the current position and increments the position
+   * @return this object
    */
   public abstract ByteBuff put(byte b);
 
   /**
-   * Writes a byte to this ByteBuff at the given index nn * @return this object
+   * Writes a byte to this ByteBuff at the given index
+   * @return this object
    */
   public abstract ByteBuff put(int index, byte b);
 
   /**
    * Copies the specified number of bytes from this ByteBuff's current position to the byte[]'s
-   * offset. Also advances the position of the ByteBuff by the given length. n * @param offset
-   * within the current array
+   * offset. Also advances the position of the ByteBuff by the given length.
+   * @param offset within the current array
    * @param length upto which the bytes to be copied
    */
   public abstract void get(byte[] dst, int offset, int length);
@@ -242,20 +246,22 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
 
   /**
    * Copies the content from this ByteBuff's current position to the byte array and fills it. Also
-   * advances the position of the ByteBuff by the length of the byte[]. n
+   * advances the position of the ByteBuff by the length of the byte[].
    */
   public abstract void get(byte[] dst);
 
   /**
-   * Copies from the given byte[] to this ByteBuff n * @param offset the position in the byte array
-   * from which the copy should be done
+   * Copies from the given byte[] to this ByteBuff
+   * @param offset the position in the byte array from which the copy should be done
    * @param length the length upto which the copy should happen
    * @return this ByteBuff
    */
   public abstract ByteBuff put(byte[] src, int offset, int length);
 
   /**
-   * Copies from the given byte[] to this ByteBuff n * @return this ByteBuff
+   * Copies from the given byte[] to this ByteBuff
+   * @param src source byte array
+   * @return this ByteBuff
    */
   public abstract ByteBuff put(byte[] src);
 
@@ -279,14 +285,15 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
    * Fetches the short value at the given index. Does not change position of the underlying
    * ByteBuffers. The caller is sure that the index will be after the current position of this
    * ByteBuff. So even if the current short does not fit in the current item we can safely move to
-   * the next item and fetch the remaining bytes forming the short n * @return the short value at
-   * the given index
+   * the next item and fetch the remaining bytes forming the short
+   * @return the short value at the given index
    */
   public abstract short getShort(int index);
 
   /**
    * Fetches the short value at the given offset from current position. Does not change position of
-   * the underlying ByteBuffers. n * @return the short value at the given index.
+   * the underlying ByteBuffers.
+   * @return the short value at the given index.
    */
   public abstract short getShortAfterPosition(int offset);
 
@@ -307,13 +314,15 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
   /**
    * Fetches the int at the given index. Does not change position of the underlying ByteBuffers.
    * Even if the current int does not fit in the current item we can safely move to the next item
-   * and fetch the remaining bytes forming the int n * @return the int value at the given index
+   * and fetch the remaining bytes forming the int
+   * @return the int value at the given index
    */
   public abstract int getInt(int index);
 
   /**
    * Fetches the int value at the given offset from current position. Does not change position of
-   * the underlying ByteBuffers. n * @return the int value at the given index.
+   * the underlying ByteBuffers.
+   * @return the int value at the given index.
    */
   public abstract int getIntAfterPosition(int offset);
 
@@ -335,13 +344,15 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
    * Fetches the long at the given index. Does not change position of the underlying ByteBuffers.
    * The caller is sure that the index will be after the current position of this ByteBuff. So even
    * if the current long does not fit in the current item we can safely move to the next item and
-   * fetch the remaining bytes forming the long n * @return the long value at the given index
+   * fetch the remaining bytes forming the long
+   * @return the long value at the given index
    */
   public abstract long getLong(int index);
 
   /**
    * Fetches the long value at the given offset from current position. Does not change position of
-   * the underlying ByteBuffers. n * @return the long value at the given index.
+   * the underlying ByteBuffers.
+   * @return the long value at the given index.
    */
   public abstract long getLongAfterPosition(int offset);
 
@@ -354,8 +365,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
   }
 
   /**
-   * Copy the content from this ByteBuff to a byte[] based on the given offset and length n * the
-   * position from where the copy should start n * the length upto which the copy has to be done
+   * Copy the content from this ByteBuff to a byte[] based on the given offset and length the
+   * position from where the copy should start the length upto which the copy has to be done
    * @return byte[] with the copied contents from this ByteBuff.
    */
   public abstract byte[] toBytes(int offset, int length);
@@ -380,8 +391,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
   public abstract ByteBuff put(int offset, ByteBuff src, int srcOffset, int length);
 
   /**
-   * Reads bytes from the given channel into this ByteBuff n * @return The number of bytes read from
-   * the channel n
+   * Reads bytes from the given channel into this ByteBuff
+   * @return The number of bytes read from the channel
    */
   public abstract int read(ReadableByteChannel channel) throws IOException;
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
index 042b21f6c00..106b398ae95 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
@@ -149,8 +149,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
-   * * @return the byte at the given index
+   * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
+   * @return the byte at the given index
    */
   @Override
   public byte get(int index) {
@@ -201,8 +201,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Fetches the int at the given index. Does not change position of the underlying ByteBuffers n
-   * * @return the int value at the given index
+   * Fetches the int at the given index. Does not change position of the underlying ByteBuffers
+   * @return the int value at the given index
    */
   @Override
   public int getInt(int index) {
@@ -235,8 +235,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Fetches the short at the given index. Does not change position of the underlying ByteBuffers n
-   * * @return the short value at the given index
+   * Fetches the short at the given index. Does not change position of the underlying ByteBuffers
+   * @return the short value at the given index
    */
   @Override
   public short getShort(int index) {
@@ -347,8 +347,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Fetches the long at the given index. Does not change position of the underlying ByteBuffers n
-   * * @return the long value at the given index
+   * Fetches the long at the given index. Does not change position of the underlying ByteBuffers
+   * @return the long value at the given index
    */
   @Override
   public long getLong(int index) {
@@ -388,7 +388,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Sets this MBB's position to the given value. n * @return this object
+   * Sets this MBB's position to the given value.
+   * @return this object
    */
   @Override
   public MultiByteBuff position(int position) {
@@ -569,7 +570,7 @@ public class MultiByteBuff extends ByteBuff {
 
   /**
    * Copies the content from this MBB's current position to the byte array and fills it. Also
-   * advances the position of the MBB by the length of the byte[]. n
+   * advances the position of the MBB by the length of the byte[].
    */
   @Override
   public void get(byte[] dst) {
@@ -578,8 +579,8 @@ public class MultiByteBuff extends ByteBuff {
 
   /**
    * Copies the specified number of bytes from this MBB's current position to the byte[]'s offset.
-   * Also advances the position of the MBB by the given length. n * @param offset within the current
-   * array
+   * Also advances the position of the MBB by the given length.
+   * @param offset within the current array
    * @param length upto which the bytes to be copied
    */
   @Override
@@ -617,7 +618,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Marks the limit of this MBB. n * @return This MBB
+   * Marks the limit of this MBB.
+   * @return This MBB
    */
   @Override
   public MultiByteBuff limit(int limit) {
@@ -688,8 +690,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Writes a byte to this MBB at the current position and increments the position n * @return this
-   * object
+   * Writes a byte to this MBB at the current position and increments the position
+   * @return this object
    */
   @Override
   public MultiByteBuff put(byte b) {
@@ -882,7 +884,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Copies from the given byte[] to this MBB n * @return this MBB
+   * Copies from the given byte[] to this MBB
+   * @return this MBB
    */
   @Override
   public final MultiByteBuff put(byte[] src) {
@@ -890,8 +893,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Copies from the given byte[] to this MBB n * @param offset the position in the byte array from
-   * which the copy should be done
+   * Copies from the given byte[] to this MBB
+   * @param offset the position in the byte array from which the copy should be done
    * @param length the length upto which the copy should happen
    * @return this MBB
    */
@@ -969,7 +972,7 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Jumps the current position of this MBB by specified length. n
+   * Jumps the current position of this MBB by specified length.
    */
   @Override
   public MultiByteBuff skip(int length) {
@@ -991,7 +994,7 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Jumps back the current position of this MBB by specified length. n
+   * Jumps back the current position of this MBB by specified length.
    */
   @Override
   public MultiByteBuff moveBack(int length) {
@@ -1118,8 +1121,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Copy the content from this MBB to a byte[] based on the given offset and length n * the
-   * position from where the copy should start n * the length upto which the copy has to be done
+   * Copy the content from this MBB to a byte[] based on the given offset and length the position
+   * from where the copy should start the length upto which the copy has to be done
    * @return byte[] with the copied contents from this MBB.
    */
   @Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
index e7d37069ac5..9714c962e12 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
@@ -167,7 +167,8 @@ public abstract class User {
   }
 
   /**
-   * Executes the given action as the login user n * @return the result of the action n
+   * Executes the given action as the login user
+   * @return the result of the action
    */
   @SuppressWarnings({ "rawtypes", "unchecked" })
   public static <T> T runAsLoginUser(PrivilegedExceptionAction<T> action) throws IOException {
@@ -183,7 +184,7 @@ public abstract class User {
 
   /**
    * Wraps an underlying {@code UserGroupInformation} instance.
-   * @param ugi The base Hadoop user n
+   * @param ugi The base Hadoop user
    */
   public static User create(UserGroupInformation ugi) {
     if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
index f6766bd1f3c..fd88fb19308 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
@@ -181,7 +181,7 @@ public class UserProvider extends BaseConfigurable {
 
   /**
    * Wraps an underlying {@code UserGroupInformation} instance.
-   * @param ugi The base Hadoop user n
+   * @param ugi The base Hadoop user
    */
   public User create(UserGroupInformation ugi) {
     if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
index 179074ef00c..88ee9c9666a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
@@ -69,7 +69,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange
 
   /**
    * Update the beginning of this range. {@code offset + length} may not be greater than
-   * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+   * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
    * @return this.
    */
   @Override
@@ -82,7 +82,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange
   /**
    * Update the length of this range. {@code offset + length} should not be greater than
    * {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
-   * {@code position} to {@code length}. n * The new length of this range.
+   * {@code position} to {@code length}. The new length of this range.
    * @return this.
    */
   @Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
index d502bf7bb8d..6c92ac1aac4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
@@ -573,7 +573,7 @@ public final class ByteBufferUtils {
 
   /**
    * Read integer from stream coded in 7 bits and increment position.
-   * @return the integer that has been read n
+   * @return the integer that has been read
    */
   public static int readCompressedInt(InputStream input) throws IOException {
     int result = 0;
@@ -606,7 +606,7 @@ public final class ByteBufferUtils {
   /**
    * Read long which was written to fitInBytes bytes and increment position.
    * @param fitInBytes In how many bytes given long is stored.
-   * @return The value of parsed long. n
+   * @return The value of parsed long.
    */
   public static long readLong(InputStream in, final int fitInBytes) throws IOException {
     long tmpLong = 0;
@@ -684,7 +684,7 @@ public final class ByteBufferUtils {
 
   /**
    * Copy from one buffer to another from given offset. This will be absolute positional copying and
-   * won't affect the position of any of the buffers. nnnnn
+   * won't affect the position of any of the buffers.
    */
   public static void copyFromBufferToBuffer(ByteBuffer in, ByteBuffer out, int sourceOffset,
     int destinationOffset, int length) {
@@ -854,8 +854,8 @@ public final class ByteBufferUtils {
   }
 
   /**
-   * Copy the given number of bytes from specified offset into a new byte[] nnn * @return a new
-   * byte[] containing the bytes in the specified range
+   * Copy the given number of bytes from specified offset into a new byte[]
+   * @return a new byte[] containing the bytes in the specified range
    */
   public static byte[] toBytes(ByteBuffer buffer, int offset, int length) {
     byte[] output = new byte[length];
@@ -878,7 +878,7 @@ public final class ByteBufferUtils {
   }
 
   /**
-   * n * ByteBuffer to hash n * offset to start from n * length to hash
+   * ByteBuffer to hash offset to start from length to hash
    */
   public static int hashCode(ByteBuffer buf, int offset, int length) {
     int hash = 1;
@@ -960,7 +960,8 @@ public final class ByteBufferUtils {
   }
 
   /**
-   * Reads a short value at the given buffer's offset. nn * @return short value at offset
+   * Reads a short value at the given buffer's offset.
+   * @return short value at offset
    */
   public static short toShort(ByteBuffer buffer, int offset) {
     return ConverterHolder.BEST_CONVERTER.toShort(buffer, offset);
@@ -974,7 +975,8 @@ public final class ByteBufferUtils {
   }
 
   /**
-   * Reads an int value at the given buffer's offset. nn * @return int value at offset
+   * Reads an int value at the given buffer's offset.
+   * @return int value at offset
    */
   public static int toInt(ByteBuffer buffer, int offset) {
     return ConverterHolder.BEST_CONVERTER.toInt(buffer, offset);
@@ -985,7 +987,7 @@ public final class ByteBufferUtils {
    * @param buf    The ByteBuffer
    * @param offset Offset to int value
    * @param length Number of bytes used to store the int value.
-   * @return the int value n * if there's not enough bytes left in the buffer after the given offset
+   * @return the int value if there's not enough bytes left in the buffer after the given offset
    */
   public static int readAsInt(ByteBuffer buf, int offset, final int length) {
     if (offset + length > buf.limit()) {
@@ -1001,7 +1003,8 @@ public final class ByteBufferUtils {
   }
 
   /**
-   * Reads a long value at the given buffer's offset. nn * @return long value at offset
+   * Reads a long value at the given buffer's offset.
+   * @return long value at offset
    */
   public static long toLong(ByteBuffer buffer, int offset) {
     return ConverterHolder.BEST_CONVERTER.toLong(buffer, offset);
@@ -1022,7 +1025,8 @@ public final class ByteBufferUtils {
   }
 
   /**
-   * Reads a double value at the given buffer's offset. n * @param offset offset where double is
+   * Reads a double value at the given buffer's offset.
+   * @param offset offset where double is
    * @return double value at offset
    */
   public static double toDouble(ByteBuffer buffer, int offset) {
@@ -1030,7 +1034,8 @@ public final class ByteBufferUtils {
   }
 
   /**
-   * Reads a BigDecimal value at the given buffer's offset. nn * @return BigDecimal value at offset
+   * Reads a BigDecimal value at the given buffer's offset.
+   * @return BigDecimal value at offset
    */
   public static BigDecimal toBigDecimal(ByteBuffer buffer, int offset, int length) {
     if (buffer == null || length < Bytes.SIZEOF_INT + 1 || (offset + length > buffer.limit())) {
@@ -1081,7 +1086,6 @@ public final class ByteBufferUtils {
   /**
    * Copies the bytes from given array's offset to length part into the given buffer. Puts the bytes
    * to buffer's current position. This also advances the position in the 'out' buffer by 'length'
-   * nnnn
    */
   public static void copyFromArrayToBuffer(ByteBuffer out, byte[] in, int inOffset, int length) {
     if (out.hasArray()) {
@@ -1099,7 +1103,7 @@ public final class ByteBufferUtils {
 
   /**
    * Copies bytes from given array's offset to length part into the given buffer. Puts the bytes to
-   * buffer's given position. This doesn't affact the position of buffer. nnnn
+   * buffer's given position. This doesn't affact the position of buffer.
    */
   public static void copyFromArrayToBuffer(ByteBuffer out, int outOffset, byte[] in, int inOffset,
     int length) {
@@ -1116,7 +1120,7 @@ public final class ByteBufferUtils {
 
   /**
    * Copies specified number of bytes from given offset of 'in' ByteBuffer to the array. This
-   * doesn't affact the position of buffer. nnnnn
+   * doesn't affact the position of buffer.
    */
   public static void copyFromBufferToArray(byte[] out, ByteBuffer in, int sourceOffset,
     int destinationOffset, int length) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
index 64bd5cb3b6c..4addf9057e2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
@@ -63,14 +63,13 @@ public interface ByteRange extends Comparable<ByteRange> {
 
   /**
    * Nullifies this ByteRange. That is, it becomes a husk, being a range over no byte[] whatsoever.
-   * n
    */
   public ByteRange unset();
 
   /**
    * Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
    * is set to {@code capacity}.
-   * @param capacity the size of a new byte[]. n
+   * @param capacity the size of a new byte[].
    */
   public ByteRange set(int capacity);
 
@@ -78,7 +77,7 @@ public interface ByteRange extends Comparable<ByteRange> {
    * Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
    * is set to {@code bytes.length}. A null {@code bytes} IS supported, in which case this method
    * will behave equivalently to {@link #unset()}.
-   * @param bytes the array to wrap. n
+   * @param bytes the array to wrap.
    */
   public ByteRange set(byte[] bytes);
 
@@ -188,21 +187,21 @@ public interface ByteRange extends Comparable<ByteRange> {
   /**
    * Store the short value at {@code index}
    * @param index the index in the range where {@code val} is stored
-   * @param val   the value to store n
+   * @param val   the value to store
    */
   public ByteRange putShort(int index, short val);
 
   /**
    * Store the int value at {@code index}
    * @param index the index in the range where {@code val} is stored
-   * @param val   the value to store n
+   * @param val   the value to store
    */
   public ByteRange putInt(int index, int val);
 
   /**
    * Store the long value at {@code index}
    * @param index the index in the range where {@code val} is stored
-   * @param val   the value to store n
+   * @param val   the value to store
    */
   public ByteRange putLong(int index, long val);
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index 1955aecf433..6c83d70d19b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -207,7 +207,7 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * @param b Use passed bytes as backing array for this instance. nn
+   * @param b Use passed bytes as backing array for this instance.
    */
   public void set(final byte[] b, final int offset, final int length) {
     this.bytes = b;
@@ -240,8 +240,7 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * n
-   */
+   *   */
   public int getOffset() {
     return this.offset;
   }
@@ -272,8 +271,9 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * Compares the bytes in this object to the specified byte array n * @return Positive if left is
-   * bigger than right, 0 if they are equal, and negative if left is smaller than right.
+   * Compares the bytes in this object to the specified byte array
+   * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
+   *         smaller than right.
    */
   public int compareTo(final byte[] that) {
     return BYTES_RAWCOMPARATOR.compare(this.bytes, this.offset, this.length, that, 0, that.length);
@@ -589,7 +589,8 @@ public class Bytes implements Comparable<Bytes> {
 
   /**
    * Write a printable representation of a byte array.
-   * @param b byte array n * @see #toStringBinary(byte[], int, int)
+   * @param b byte array
+   * @see #toStringBinary(byte[], int, int)
    */
   public static String toStringBinary(final byte[] b) {
     if (b == null) return "null";
@@ -1143,7 +1144,8 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * Convert a BigDecimal value to a byte array n * @return the byte array
+   * Convert a BigDecimal value to a byte array
+   * @return the byte array
    */
   public static byte[] toBytes(BigDecimal val) {
     byte[] valueBytes = val.unscaledValue().toByteArray();
@@ -1154,14 +1156,16 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * Converts a byte array to a BigDecimal n * @return the char value
+   * Converts a byte array to a BigDecimal
+   * @return the char value
    */
   public static BigDecimal toBigDecimal(byte[] bytes) {
     return toBigDecimal(bytes, 0, bytes.length);
   }
 
   /**
-   * Converts a byte array to a BigDecimal value nnn * @return the char value
+   * Converts a byte array to a BigDecimal value
+   * @return the char value
    */
   public static BigDecimal toBigDecimal(byte[] bytes, int offset, final int length) {
     if (bytes == null || length < SIZEOF_INT + 1 || (offset + length > bytes.length)) {
@@ -2244,7 +2248,7 @@ public class Bytes implements Comparable<Bytes> {
    * Copy the byte array given in parameter and return an instance of a new byte array with the same
    * length and the same content.
    * @param bytes the byte array to copy from
-   * @return a copy of the given designated byte array nn
+   * @return a copy of the given designated byte array
    */
   public static byte[] copy(byte[] bytes, final int offset, final int length) {
     if (bytes == null) return null;
@@ -2423,7 +2427,7 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * Fill given array with zeros at the specified position. nnn
+   * Fill given array with zeros at the specified position.
    */
   public static void zero(byte[] b, int offset, int length) {
     checkPositionIndex(offset, b.length, "offset");
@@ -2506,7 +2510,8 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * Create a byte array which is multiple given bytes nn * @return byte array
+   * Create a byte array which is multiple given bytes
+   * @return byte array
    */
   public static byte[] multiple(byte[] srcBytes, int multiNum) {
     if (multiNum <= 0) {
@@ -2561,7 +2566,7 @@ public class Bytes implements Comparable<Bytes> {
 
   /**
    * Create a byte array from a string of hash digits. The length of the string must be a multiple
-   * of 2 n
+   * of 2
    */
   public static byte[] fromHex(String hex) {
     checkArgument(hex.length() % 2 == 0, "length must be a multiple of 2");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
index d943803fb2f..dc810834a66 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
@@ -85,8 +85,8 @@ public enum ChecksumType {
   }
 
   /**
-   * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes. n
-   * * @return Type associated with passed code.
+   * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
+   * @return Type associated with passed code.
    */
   public static ChecksumType codeToType(final byte b) {
     for (ChecksumType t : ChecksumType.values()) {
@@ -98,8 +98,8 @@ public enum ChecksumType {
   }
 
   /**
-   * Map a checksum name to a specific type. Do our own names. n * @return Type associated with
-   * passed code.
+   * Map a checksum name to a specific type. Do our own names.
+   * @return Type associated with passed code.
    */
   public static ChecksumType nameToType(final String name) {
     for (ChecksumType t : ChecksumType.values()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
index 84e70873727..1b3eef180a5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
@@ -27,10 +27,9 @@ public class Classes {
 
   /**
    * Equivalent of {@link Class#forName(String)} which also returns classes for primitives like
-   * <code>boolean</code>, etc. n * The name of the class to retrieve. Can be either a normal class
-   * or a primitive class.
-   * @return The class specified by <code>className</code> n * If the requested class can not be
-   *         found.
+   * <code>boolean</code>, etc. The name of the class to retrieve. Can be either a normal class or a
+   * primitive class.
+   * @return The class specified by <code>className</code> If the requested class can not be found.
    */
   public static Class<?> extendedForName(String className) throws ClassNotFoundException {
     Class<?> valueType;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index fb33e6d6897..e9047cd2c2f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -330,7 +330,7 @@ public final class CommonFSUtils {
    * Returns the URI in the string format
    * @param c configuration
    * @param p path
-   * @return - the URI's to string format n
+   * @return - the URI's to string format
    */
   public static String getDirUri(final Configuration c, Path p) throws IOException {
     if (p.toUri().getScheme() != null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
index bfd96adb8db..d2a6fce9d19 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
@@ -215,7 +215,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
    * @param parent     the parent class loader for exempted classes
    * @param pathPrefix a prefix used in temp path name to store the jar file locally
    * @param conf       the configuration used to create the class loader, if needed
-   * @return a CoprocessorClassLoader for the coprocessor jar path n
+   * @return a CoprocessorClassLoader for the coprocessor jar path
    */
   public static CoprocessorClassLoader getClassLoader(final Path path, final ClassLoader parent,
     final String pathPrefix, final Configuration conf) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/HashKey.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/HashKey.java
index e4e89f7711f..642afe43932 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/HashKey.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/HashKey.java
@@ -31,9 +31,7 @@ public abstract class HashKey<T> {
     this.t = t;
   }
 
-  /**
-   * n * @return The byte at the given position in this HashKey
-   */
+  /** Returns The byte at the given position in this HashKey */
   public abstract byte get(int pos);
 
   /** Returns The number of bytes in this HashKey */
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
index 276e436ed13..0cd1b41c502 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
@@ -58,7 +58,7 @@ public class KeyLocker<K> {
     }, NB_CONCURRENT_LOCKS);
 
   /**
-   * Return a lock for the given key. The lock is already locked. n
+   * Return a lock for the given key. The lock is already locked.
    */
   public ReentrantLock acquireLock(K key) {
     if (key == null) throw new IllegalArgumentException("key must not be null");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
index 8202c96b289..ee6b715db1f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
@@ -32,8 +32,8 @@ public class MD5Hash {
   private static final Logger LOG = LoggerFactory.getLogger(MD5Hash.class);
 
   /**
-   * Given a byte array, returns in MD5 hash as a hex string. n * @return SHA1 hash as a 32
-   * character hex string.
+   * Given a byte array, returns in MD5 hash as a hex string.
+   * @return SHA1 hash as a 32 character hex string.
    */
   public static String getMD5AsHex(byte[] key) {
     return getMD5AsHex(key, 0, key.length);
@@ -42,8 +42,8 @@ public class MD5Hash {
   /**
    * Given a byte array, returns its MD5 hash as a hex string. Only "length" number of bytes
    * starting at "offset" within the byte array are used.
-   * @param key the key to hash (variable length byte array) nn * @return MD5 hash as a 32 character
-   *            hex string.
+   * @param key the key to hash (variable length byte array)
+   * @return MD5 hash as a 32 character hex string.
    */
   public static String getMD5AsHex(byte[] key, int offset, int length) {
     try {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
index 2d55d572ff6..01986e64233 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
@@ -76,14 +76,14 @@ public class Pair<T1, T2> implements Serializable {
   }
 
   /**
-   * Return the first element stored in the pair. n
+   * Return the first element stored in the pair.
    */
   public T1 getFirst() {
     return first;
   }
 
   /**
-   * Return the second element stored in the pair. n
+   * Return the second element stored in the pair.
    */
   public T2 getSecond() {
     return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
index 745bd759c76..759f3da4409 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
@@ -43,14 +43,14 @@ public class PairOfSameType<T> implements Iterable<T> {
   }
 
   /**
-   * Return the first element stored in the pair. n
+   * Return the first element stored in the pair.
    */
   public T getFirst() {
     return first;
   }
 
   /**
-   * Return the second element stored in the pair. n
+   * Return the second element stored in the pair.
    */
   public T getSecond() {
     return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
index efa52612be6..cb61cfbe246 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
@@ -156,12 +156,12 @@ public interface PositionedByteRange extends ByteRange {
   public PositionedByteRange put(byte[] val, int offset, int length);
 
   /**
-   * Limits the byte range upto a specified value. Limit cannot be greater than capacity nn
+   * Limits the byte range upto a specified value. Limit cannot be greater than capacity
    */
   public PositionedByteRange setLimit(int limit);
 
   /**
-   * Return the current limit n
+   * Return the current limit
    */
   public int getLimit();
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index ce168dc60c9..d5b3f894a7f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@ -69,8 +69,8 @@ public final class PrettyPrinter {
 
   /**
    * Convert a human readable string to its value.
-   * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit) nn * @return the value
-   *      corresponding to the human readable string
+   * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
+   * @return the value corresponding to the human readable string
    */
   public static String valueOf(final String pretty, final Unit unit) throws HBaseException {
     StringBuilder value = new StringBuilder();
@@ -144,7 +144,8 @@ public final class PrettyPrinter {
    * Convert a human readable time interval to seconds. Examples of the human readable time
    * intervals are: 50 DAYS 1 HOUR 30 MINUTES , 25000 SECONDS etc. The units of time specified can
    * be in uppercase as well as lowercase. Also, if a single number is specified without any time
-   * unit, it is assumed to be in seconds. n * @return value in seconds
+   * unit, it is assumed to be in seconds.
+   * @return value in seconds
    */
   private static long humanReadableIntervalToSec(final String humanReadableInterval)
     throws HBaseException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
index 24b9f2d997b..868c731e0a8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
@@ -66,22 +66,22 @@ public class SimpleMutableByteRange extends AbstractByteRange {
 
   /**
    * Create a new {@code ByteRange} over a new backing array of size {@code capacity}. The range's
-   * offset and length are 0 and {@code capacity}, respectively. n * the size of the backing array.
+   * offset and length are 0 and {@code capacity}, respectively. the size of the backing array.
    */
   public SimpleMutableByteRange(int capacity) {
     this(new byte[capacity]);
   }
 
   /**
-   * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap.
+   * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap.
    */
   public SimpleMutableByteRange(byte[] bytes) {
     set(bytes);
   }
 
   /**
-   * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap. n * The
-   * offset into {@code bytes} considered the beginning of this range. n * The length of this range.
+   * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap. The offset
+   * into {@code bytes} considered the beginning of this range. The length of this range.
    */
   public SimpleMutableByteRange(byte[] bytes, int offset, int length) {
     set(bytes, offset, length);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
index d91fd712f37..68e99c3053b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
@@ -70,7 +70,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
 
   /**
    * Create a new {@code PositionedByteRange} over a new backing array of size {@code capacity}. The
-   * range's offset and length are 0 and {@code capacity}, respectively. n * the size of the backing
+   * range's offset and length are 0 and {@code capacity}, respectively. the size of the backing
    * array.
    */
   public SimplePositionedMutableByteRange(int capacity) {
@@ -78,17 +78,15 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
   }
 
   /**
-   * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
-   * wrap.
+   * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
    */
   public SimplePositionedMutableByteRange(byte[] bytes) {
     set(bytes);
   }
 
   /**
-   * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
-   * wrap. n * The offset into {@code bytes} considered the beginning of this range. n * The length
-   * of this range.
+   * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
+   * The offset into {@code bytes} considered the beginning of this range. The length of this range.
    */
   public SimplePositionedMutableByteRange(byte[] bytes, int offset, int length) {
     set(bytes, offset, length);
@@ -130,7 +128,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
 
   /**
    * Update the beginning of this range. {@code offset + length} may not be greater than
-   * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+   * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
    * @return this.
    */
   @Override
@@ -143,7 +141,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
   /**
    * Update the length of this range. {@code offset + length} should not be greater than
    * {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
-   * {@code position} to {@code length}. n * The new length of this range.
+   * {@code position} to {@code length}. The new length of this range.
    * @return this.
    */
   @Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
index 3fb1a0896fa..b1162fdc992 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
@@ -110,7 +110,7 @@ public class Threads {
   }
 
   /**
-   * @param t Waits on the passed thread to die dumping a threaddump every minute while its up. n
+   * @param t Waits on the passed thread to die dumping a threaddump every minute while its up.
    */
   public static void threadDumpingIsAlive(final Thread t) throws InterruptedException {
     if (t == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
index 0caecf649ce..e23c62045fa 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
@@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface TimeMeasurable<T> {
 
   /**
-   * Measure elapsed time. n
+   * Measure elapsed time.
    */
   T measure();
 }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
index 40bd7baa4b2..a483fd326b6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
@@ -193,7 +193,7 @@ public final class UnsafeAccess {
 
   /**
    * Reads a int value at the given Object's offset considering it was written in big-endian format.
-   * nn * @return int value at offset
+   * @return int value at offset
    */
   public static int toInt(Object ref, long offset) {
     if (LITTLE_ENDIAN) {
@@ -295,7 +295,7 @@ public final class UnsafeAccess {
 
   // APIs to copy data. This will be direct memory location copy and will be much faster
   /**
-   * Copies the bytes from given array's offset to length part into the given buffer. nnnnn
+   * Copies the bytes from given array's offset to length part into the given buffer.
    */
   public static void copy(byte[] src, int srcOffset, ByteBuffer dest, int destOffset, int length) {
     long destAddress = destOffset;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
index b79924a352c..1fe83ad6438 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
@@ -73,7 +73,7 @@ public class WindowMovingAverage extends MovingAverage {
 
   /**
    * Get statistics at index.
-   * @param index index of bar n
+   * @param index index of bar
    */
   protected long getStatisticsAtIndex(int index) {
     if (index < 0 || index >= getNumberOfStatistics()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
index fc152be962c..6eb87a93628 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
@@ -102,8 +102,8 @@ public final class ZKConfig {
   }
 
   /**
-   * Return the ZK Quorum servers string given the specified configuration n * @return Quorum
-   * servers String
+   * Return the ZK Quorum servers string given the specified configuration
+   * @return Quorum servers String
    */
   private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf) {
     String defaultClientPort = Integer.toString(
@@ -165,8 +165,8 @@ public final class ZKConfig {
 
   /**
    * Separate the given key into the three configurations it should contain: hbase.zookeeper.quorum,
-   * hbase.zookeeper.client.port and zookeeper.znode.parent n * @return the three configuration in
-   * the described order n
+   * hbase.zookeeper.client.port and zookeeper.znode.parent
+   * @return the three configuration in the described order
    */
   public static ZKClusterKey transformClusterKey(String key) throws IOException {
     String[] parts = key.split(":");
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 37c6831bfca..51698559218 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -249,7 +249,7 @@ public class TestHBaseConfiguration {
     }
 
     /**
-     * Wrapper to fetch the configured {@code List<CredentialProvider>}s. n * Configuration with
+     * Wrapper to fetch the configured {@code List<CredentialProvider>}s. Configuration with
      * GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS defined
      * @return List of CredentialProviders, or null if they could not be loaded
      */
@@ -274,8 +274,8 @@ public class TestHBaseConfiguration {
 
     /**
      * Create a CredentialEntry using the configured Providers. If multiple CredentialProviders are
-     * configured, the first will be used. n * Configuration for the CredentialProvider n *
-     * CredentialEntry name (alias) n * The credential
+     * configured, the first will be used. Configuration for the CredentialProvider CredentialEntry
+     * name (alias) The credential
      */
     public void createEntry(Configuration conf, String name, char[] credential) throws Exception {
       if (!isHadoopCredentialProviderAvailable()) {
@@ -294,8 +294,8 @@ public class TestHBaseConfiguration {
 
     /**
      * Create a CredentialEntry with the give name and credential in the credentialProvider. The
-     * credentialProvider argument must be an instance of Hadoop CredentialProvider. n * Instance of
-     * CredentialProvider n * CredentialEntry name (alias) n * The credential to store
+     * credentialProvider argument must be an instance of Hadoop CredentialProvider. Instance of
+     * CredentialProvider CredentialEntry name (alias) The credential to store
      */
     private void createEntryInProvider(Object credentialProvider, String name, char[] credential)
       throws Exception {
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 2bb72bef1ae..56b92056a02 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -54,7 +54,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
   void updatePut(long t);
 
   /**
-   * Update the PutBatch time histogram if a batch contains a Put op n
+   * Update the PutBatch time histogram if a batch contains a Put op
    */
   void updatePutBatch(long t);
 
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
index 1de3a34bfeb..05f967731a5 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
@@ -80,7 +80,7 @@ public interface MetricsRESTSource extends BaseSource, JvmPauseMonitorSource {
   void incrementSucessfulPutRequests(int inc);
 
   /**
-   * Increment the number of successful Delete requests. n
+   * Increment the number of successful Delete requests.
    */
   void incrementSucessfulDeleteRequests(int inc);
 
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
index 214626204e7..3d938bdd539 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
@@ -36,7 +36,7 @@ public interface MetricsThriftServerSource extends ExceptionTrackingSource, JvmP
   String ACTIVE_WORKER_COUNT_KEY = "numActiveWorkers";
 
   /**
-   * Add how long an operation was in the queue. n
+   * Add how long an operation was in the queue.
    */
   void incTimeInQueue(long time);
 
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
index 66d5e3d87a4..0581d431ebf 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
@@ -41,7 +41,7 @@ public interface MetricHistogram {
   String NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME = "_99.9th_percentile";
 
   /**
-   * Add a single value to a histogram's stream of values. n
+   * Add a single value to a histogram's stream of values.
    */
   void add(long value);
 
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
index cf01b099bb8..d4cc4ba6f4d 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
@@ -67,7 +67,7 @@ public class MetricSampleQuantiles {
   /**
    * Specifies the allowable error for this rank, depending on which quantiles are being targeted.
    * This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this
-   * rank can be. n * the index in the list of samples
+   * rank can be. the index in the list of samples
    */
   private double allowableError(int rank) {
     int size = samples.size();
@@ -207,7 +207,7 @@ public class MetricSampleQuantiles {
 
   /**
    * Get a snapshot of the current values of all the tracked quantiles.
-   * @return snapshot of the tracked quantiles n * if no items have been added to the estimator
+   * @return snapshot of the tracked quantiles if no items have been added to the estimator
    */
   synchronized public Map<MetricQuantile, Long> snapshot() throws IOException {
     // flush the buffer first for best results
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 8b4890fa54a..b7d10444675 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -239,10 +239,10 @@ public class HttpServer implements FilterContainer {
     private int port = -1;
 
     /**
-     * Add an endpoint that the HTTP server should listen to. n * the endpoint of that the HTTP
-     * server should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host
-     * specifies the binding address, and the port specifies the listening port. Unspecified or zero
-     * port means that the server can listen to any port.
+     * Add an endpoint that the HTTP server should listen to. the endpoint of that the HTTP server
+     * should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host specifies
+     * the binding address, and the port specifies the listening port. Unspecified or zero port
+     * means that the server can listen to any port.
      */
     public Builder addEndpoint(URI endpoint) {
       endpoints.add(endpoint);
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
index c8456a461bb..494a30c3e77 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -141,7 +141,7 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter {
 
   /**
    * The purpose of this function is to get the doAs parameter of a http request case insensitively
-   * n * @return doAs parameter if exists or null otherwise
+   * @return doAs parameter if exists or null otherwise
    */
   public static String getDoasFromHeader(final HttpServletRequest request) {
     String doas = null;
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index c68250e8d94..d6dfbc2173d 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -142,8 +142,8 @@ public class JMXJsonServlet extends HttpServlet {
   }
 
   /**
-   * Process a GET request for the specified resource. n * The servlet request we are processing n *
-   * The servlet response we are creating
+   * Process a GET request for the specified resource. The servlet request we are processing The
+   * servlet response we are creating
    */
   @Override
   public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
index 760f4c0a2b0..1c5387fdaf8 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
@@ -109,7 +109,7 @@ public final class JSONMetricUtil {
    * Method for building map used for constructing ObjectName. Mapping is done with arrays indices
    * @param keys   Map keys
    * @param values Map values
-   * @return Map or null if arrays are empty * or have different number of elements
+   * @return Map or null if arrays are empty or have different number of elements
    */
   public static Hashtable<String, String> buldKeyValueTable(String[] keys, String[] values) {
     if (keys.length != values.length) {
diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java
index e517a5ffedb..c0bfbb4e56b 100644
--- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java
+++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java
@@ -120,7 +120,7 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
   }
 
   /**
-   * Test that the server is alive once started n * on failure
+   * Test that the server is alive once started on failure
    */
   @Ignore("Hangs on occasion; see HBASE-14430")
   @Test
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index 40fd27a27c9..556cd216d34 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -76,7 +76,7 @@ public class DistributedHBaseCluster extends HBaseCluster {
   }
 
   /**
-   * Returns a ClusterStatus for this HBase cluster n
+   * Returns a ClusterStatus for this HBase cluster
    */
   @Override
   public ClusterMetrics getClusterMetrics() throws IOException {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index 89439101deb..a5d3d75555d 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -669,7 +669,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
   }
 
   /**
-   * After adding data to the table start a mr job to nnn
+   * After adding data to the table start a mr job to
    */
   private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
     LOG.info("Running check");
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 798f6d66fac..4356f06a131 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -1147,8 +1147,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
       }
 
       /**
-       * nn * @return Return new byte array that has <code>ordinal</code> as prefix on front taking
-       * up Bytes.SIZEOF_SHORT bytes followed by <code>r</code>
+       * Returns new byte array that has <code>ordinal</code> as prefix on front taking up
+       * Bytes.SIZEOF_SHORT bytes followed by <code>r</code>
        */
       public static byte[] addPrefixFlag(final int ordinal, final byte[] r) {
         byte[] prefix = Bytes.toBytes((short) ordinal);
@@ -1162,7 +1162,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
       }
 
       /**
-       * n * @return Type from the Counts enum of this row. Reads prefix added by
+       * Returns type from the Counts enum of this row. Reads prefix added by
        * {@link #addPrefixFlag(int, byte[])}
        */
       public static Counts whichType(final byte[] bs) {
@@ -1170,9 +1170,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
         return Counts.values()[ordinal];
       }
 
-      /**
-       * n * @return Row bytes minus the type flag.
-       */
+      /** Returns Row bytes minus the type flag. */
       public static byte[] getRowOnly(BytesWritable bw) {
         byte[] bytes = new byte[bw.getLength() - Bytes.SIZEOF_SHORT];
         System.arraycopy(bw.getBytes(), Bytes.SIZEOF_SHORT, bytes, 0, bytes.length);
@@ -1261,7 +1259,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
 
       /**
        * Dump out extra info around references if there are any. Helps debugging.
-       * @return StringBuilder filled with references if any. n
+       * @return StringBuilder filled with references if any.
        */
       private StringBuilder dumpExtraInfoOnRefs(final BytesWritable key, final Context context,
         final List<byte[]> refs) throws IOException {
@@ -1412,8 +1410,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
     }
 
     /**
-     * Verify the values in the Counters against the expected number of entries written. n *
-     * Expected number of referenced entrires n * The Job's Counters object
+     * Verify the values in the Counters against the expected number of entries written. Expected
+     * number of referenced entrires The Job's Counters object
      * @return True if the values match what's expected, false otherwise
      */
     protected boolean verifyExpectedValues(long expectedReferenced, Counters counters) {
@@ -1441,7 +1439,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
 
     /**
      * Verify that the Counters don't contain values which indicate an outright failure from the
-     * Reducers. n * The Job's counters
+     * Reducers. The Job's counters
      * @return True if the "bad" counter objects are 0, false otherwise
      */
     protected boolean verifyUnexpectedValues(Counters counters) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
index 740a22f0f4d..7cd7ed15342 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
@@ -171,7 +171,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
     /**
      * This tears down any tables that existed from before and rebuilds the tables and schemas on
      * the source cluster. It then sets up replication from the source to the sink cluster by using
-     * the {@link org.apache.hadoop.hbase.client.replication.ReplicationAdmin} connection. n
+     * the {@link org.apache.hadoop.hbase.client.replication.ReplicationAdmin} connection.
      */
     protected void setupTablesAndReplication() throws Exception {
       TableName tableName = getTableName(source.getConfiguration());
@@ -247,7 +247,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
 
     /**
      * Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Generator} in the
-     * source cluster. This assumes that the tables have been setup via setupTablesAndReplication. n
+     * source cluster. This assumes that the tables have been setup via setupTablesAndReplication.
      */
     protected void runGenerator() throws Exception {
       Path outputPath = new Path(outputDir);
@@ -268,7 +268,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
      * Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Verify} in the sink
      * cluster. If replication is working properly the data written at the source cluster should be
      * available in the sink cluster after a reasonable gap
-     * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster n
+     * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster
      */
     protected void runVerify(long expectedNumNodes) throws Exception {
       Path outputPath = new Path(outputDir);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
index 60e24be5128..63dc0bb28c8 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
@@ -36,9 +36,6 @@ public class Driver {
     pgd = pgd0;
   }
 
-  /**
-   * nn
-   */
   public static void main(String[] args) throws Throwable {
     pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table");
     ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
index 3d609ffd73b..58d8f49839f 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
@@ -74,7 +74,7 @@ public class GroupingTableMap extends MapReduceBase
 
   /**
    * Extract the grouping columns from value to construct a new key. Pass the new key and value to
-   * reduce. If any of the grouping columns are not found in the value, the record is skipped. nnnnn
+   * reduce. If any of the grouping columns are not found in the value, the record is skipped.
    */
   public void map(ImmutableBytesWritable key, Result value,
     OutputCollector<ImmutableBytesWritable, Result> output, Reporter reporter) throws IOException {
@@ -88,8 +88,8 @@ public class GroupingTableMap extends MapReduceBase
 
   /**
    * Extract columns values from the current record. This method returns null if any of the columns
-   * are not found. Override this method if you want to deal with nulls differently. n * @return
-   * array of byte values
+   * are not found. Override this method if you want to deal with nulls differently.
+   * @return array of byte values
    */
   protected byte[][] extractKeyValues(Result r) {
     byte[][] keyVals = null;
@@ -115,8 +115,8 @@ public class GroupingTableMap extends MapReduceBase
 
   /**
    * Create a key by concatenating multiple column values. Override this function in order to
-   * produce different types of keys. n * @return key generated by concatenating multiple column
-   * values
+   * produce different types of keys.
+   * @return key generated by concatenating multiple column values
    */
   protected ImmutableBytesWritable createGroupKey(byte[][] vals) {
     if (vals == null) {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
index 16256942d72..8af0b4b4749 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
@@ -53,7 +53,7 @@ public class IdentityTableMap extends MapReduceBase
   }
 
   /**
-   * Pass the key, value to reduce nnnnn
+   * Pass the key, value to reduce
    */
   public void map(ImmutableBytesWritable key, Result value,
     OutputCollector<ImmutableBytesWritable, Result> output, Reporter reporter) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
index 79d5f3dc8c0..29f9478da10 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
@@ -38,7 +38,7 @@ public class IdentityTableReduce extends MapReduceBase
   private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReduce.class.getName());
 
   /**
-   * No aggregation, output pairs of (key, record) nnnnn
+   * No aggregation, output pairs of (key, record)
    */
   public void reduce(ImmutableBytesWritable key, Iterator<Put> values,
     OutputCollector<ImmutableBytesWritable, Put> output, Reporter reporter) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
index 24e9da0f28d..0e9f0deaf67 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
@@ -105,7 +105,6 @@ public class MultiTableSnapshotInputFormat extends TableSnapshotInputFormat
    * restoreDir. Sets:
    * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#RESTORE_DIRS_KEY},
    * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#SNAPSHOT_TO_SCANS_KEY}
-   * nnnn
    */
   public static void setInput(Configuration conf, Map<String, Collection<Scan>> snapshotScans,
     Path restoreDir) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
index 4f95950589c..2f6324a7ac5 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
@@ -65,9 +65,7 @@ public class RowCounter extends Configured implements Tool {
     }
   }
 
-  /**
-   * n * @return the JobConf n
-   */
+  /** Returns the JobConf */
   public JobConf createSubmittableJob(String[] args) throws IOException {
     JobConf c = new JobConf(getConf(), getClass());
     c.setJobName(NAME);
@@ -104,9 +102,6 @@ public class RowCounter extends Configured implements Tool {
     return 0;
   }
 
-  /**
-   * nn
-   */
   public static void main(String[] args) throws Exception {
     int errCode = ToolRunner.run(HBaseConfiguration.create(), new RowCounter(), args);
     System.exit(errCode);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index 34736bd6a3d..667629016d3 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -210,7 +210,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
   /**
    * Allows subclasses to initialize the table information.
    * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
-   * @param tableName  The {@link TableName} of the table to process. n
+   * @param tableName  The {@link TableName} of the table to process.
    */
   protected void initializeTable(Connection connection, TableName tableName) throws IOException {
     if (this.table != null || this.connection != null) {
@@ -240,7 +240,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
   }
 
   /**
-   * Allows subclasses to set the {@link TableRecordReader}. n * to provide other
+   * Allows subclasses to set the {@link TableRecordReader}. to provide other
    * {@link TableRecordReader} implementations.
    */
   protected void setTableRecordReader(TableRecordReader tableRecordReader) {
@@ -248,7 +248,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
   }
 
   /**
-   * Allows subclasses to set the {@link Filter} to be used. n
+   * Allows subclasses to set the {@link Filter} to be used.
    */
   protected void setRowFilter(Filter rowFilter) {
     this.rowFilter = rowFilter;
@@ -272,7 +272,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
 
   /**
    * Close the Table and related objects that were initialized via
-   * {@link #initializeTable(Connection, TableName)}. n
+   * {@link #initializeTable(Connection, TableName)}.
    */
   protected void closeTable() throws IOException {
     close(table, connection);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
index 270aeb186a4..0f217ad5801 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
@@ -105,7 +105,8 @@ public class TableOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
    * Failure to do so will drop writes.
    * @param ignored Ignored filesystem
    * @param job     Current JobConf
-   * @param name    Name of the job n * @return The newly created writer instance.
+   * @param name    Name of the job
+   * @return The newly created writer instance.
    * @throws IOException When creating the writer fails.
    */
   @Override
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
index e8765c44854..414403534a9 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
@@ -34,14 +34,14 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
   private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl();
 
   /**
-   * Restart from survivable exceptions by creating a new scanner. nn
+   * Restart from survivable exceptions by creating a new scanner.
    */
   public void restart(byte[] firstRow) throws IOException {
     this.recordReaderImpl.restart(firstRow);
   }
 
   /**
-   * Build the scanner. Not done in constructor to allow for extension. n
+   * Build the scanner. Not done in constructor to allow for extension.
    */
   public void init() throws IOException {
     this.recordReaderImpl.restart(this.recordReaderImpl.getStartRow());
@@ -82,26 +82,28 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
     this.recordReaderImpl.setRowFilter(rowFilter);
   }
 
+  @Override
   public void close() {
     this.recordReaderImpl.close();
   }
 
   /**
-   * n *
    * @see org.apache.hadoop.mapred.RecordReader#createKey()
    */
+  @Override
   public ImmutableBytesWritable createKey() {
     return this.recordReaderImpl.createKey();
   }
 
   /**
-   * n *
    * @see org.apache.hadoop.mapred.RecordReader#createValue()
    */
+  @Override
   public Result createValue() {
     return this.recordReaderImpl.createValue();
   }
 
+  @Override
   public long getPos() {
 
     // This should be the ordinal tuple in the range;
@@ -109,6 +111,7 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
     return this.recordReaderImpl.getPos();
   }
 
+  @Override
   public float getProgress() {
     // Depends on the total number of tuples and getPos
     return this.recordReaderImpl.getPos();
@@ -117,8 +120,9 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
   /**
    * @param key   HStoreKey as input key.
    * @param value MapWritable as input value
-   * @return true if there was more data n
+   * @return true if there was more data
    */
+  @Override
   public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
     return this.recordReaderImpl.next(key, value);
   }
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
index 4d2131467c0..2b0f6d4458c 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
@@ -153,7 +153,6 @@ public class TableRecordReaderImpl {
   }
 
   /**
-   * n *
    * @see org.apache.hadoop.mapred.RecordReader#createKey()
    */
   public ImmutableBytesWritable createKey() {
@@ -161,7 +160,6 @@ public class TableRecordReaderImpl {
   }
 
   /**
-   * n *
    * @see org.apache.hadoop.mapred.RecordReader#createValue()
    */
   public Result createValue() {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
index 2cb63ba7a6a..0bcb559ae3c 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
@@ -43,7 +43,7 @@ public class TableSplit implements InputSplit, Comparable<TableSplit> {
   }
 
   /**
-   * Constructor nnnn
+   * Constructor
    */
   public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) {
     this.m_tableName = tableName;
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
index a48ba49058a..8d12fe5d720 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
@@ -59,7 +59,7 @@ public class CellCreator {
    * @param value     column value
    * @param voffset   value offset
    * @param vlength   value length
-   * @return created Cell n
+   * @return created Cell
    */
   public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength,
     byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset,
@@ -83,7 +83,8 @@ public class CellCreator {
    * @param voffset       value offset
    * @param vlength       value length
    * @param visExpression visibility expression to be associated with cell
-   * @return created Cell n * @deprecated since 0.98.9
+   * @return created Cell
+   * @deprecated since 0.98.9
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-10560">HBASE-10560</a>
    */
   @Deprecated
@@ -111,7 +112,8 @@ public class CellCreator {
    * @param timestamp version timestamp
    * @param value     column value
    * @param voffset   value offset
-   * @param vlength   value length n * @return created Cell n
+   * @param vlength   value length
+   * @return created Cell
    */
   public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength,
     byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
index e09e7be98eb..ccbd826c91d 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
@@ -330,7 +330,7 @@ public class HashTable extends Configured implements Tool {
     }
 
     /**
-     * Open a TableHash.Reader starting at the first hash at or after the given key. n
+     * Open a TableHash.Reader starting at the first hash at or after the given key.
      */
     public Reader newReader(Configuration conf, ImmutableBytesWritable startKey)
       throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
index fd30f8fa7ef..2c1a2ddae24 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
@@ -147,7 +147,7 @@ public class ImportTsv extends Configured implements Tool {
 
     /**
      * @param columnsSpecification the list of columns to parser out, comma separated. The row key
-     *                             should be the special token TsvParser.ROWKEY_COLUMN_SPEC n
+     *                             should be the special token TsvParser.ROWKEY_COLUMN_SPEC
      */
     public TsvParser(String columnsSpecification, String separatorStr) {
       // Configure separator
@@ -415,8 +415,8 @@ public class ImportTsv extends Configured implements Tool {
     }
 
     /**
-     * Return starting position and length of row key from the specified line bytes. nn * @return
-     * Pair of row key offset and length. n
+     * Return starting position and length of row key from the specified line bytes.
+     * @return Pair of row key offset and length.
      */
     public Pair<Integer, Integer> parseRowKey(byte[] lineBytes, int length)
       throws BadTsvLineException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
index fb42e332833..ef3179830f9 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
@@ -72,7 +72,7 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 {
    * function will configure the requisite number of reducers to write HFiles for multple tables
    * simultaneously
    * @param job                   See {@link org.apache.hadoop.mapreduce.Job}
-   * @param multiTableDescriptors Table descriptor and region locator pairs n
+   * @param multiTableDescriptors Table descriptor and region locator pairs
    */
   public static void configureIncrementalLoad(Job job, List<TableInfo> multiTableDescriptors)
     throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
index 5a5d1149755..35c12672dea 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
@@ -76,8 +76,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
     boolean useWriteAheadLogging;
 
     /**
-     * n * HBaseConfiguration to used n * whether to use write ahead logging. This can be turned off
-     * ( <tt>false</tt>) to improve performance when bulk loading data.
+     * HBaseConfiguration to used whether to use write ahead logging. This can be turned off (
+     * <tt>false</tt>) to improve performance when bulk loading data.
      */
     public MultiTableRecordWriter(Configuration conf, boolean useWriteAheadLogging)
       throws IOException {
@@ -88,8 +88,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
     }
 
     /**
-     * n * the name of the table, as a string
-     * @return the named mutator n * if there is a problem opening a table
+     * the name of the table, as a string
+     * @return the named mutator if there is a problem opening a table
      */
     BufferedMutator getBufferedMutator(ImmutableBytesWritable tableName) throws IOException {
       if (this.connection == null) {
@@ -115,8 +115,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
     }
 
     /**
-     * Writes an action (Put or Delete) to the specified table. n * the table being updated. n * the
-     * update, either a put or a delete. n * if the action is not a put or a delete.
+     * Writes an action (Put or Delete) to the specified table. the table being updated. the update,
+     * either a put or a delete. if the action is not a put or a delete.
      */
     @Override
     public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
index 93dac05101c..7fdd68c3ad8 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
@@ -78,7 +78,7 @@ public class MultiTableSnapshotInputFormatImpl {
    * Return the list of splits extracted from the scans/snapshots pushed to conf by
    * {@link #setInput(Configuration, Map, Path)}
    * @param conf Configuration to determine splits from
-   * @return Return the list of splits extracted from the scans/snapshots pushed to conf n
+   * @return Return the list of splits extracted from the scans/snapshots pushed to conf
    */
   public List<TableSnapshotInputFormatImpl.InputSplit> getSplits(Configuration conf)
     throws IOException {
@@ -112,7 +112,7 @@ public class MultiTableSnapshotInputFormatImpl {
    * Retrieve the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration by
    * {@link #setSnapshotToScans(Configuration, Map)}
    * @param conf Configuration to extract name -&gt; list&lt;scan&gt; mappings from.
-   * @return the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration n
+   * @return the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration
    */
   public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException {
 
@@ -136,7 +136,7 @@ public class MultiTableSnapshotInputFormatImpl {
   }
 
   /**
-   * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) nnn
+   * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY})
    */
   public void setSnapshotToScans(Configuration conf, Map<String, Collection<Scan>> snapshotScans)
     throws IOException {
@@ -161,7 +161,7 @@ public class MultiTableSnapshotInputFormatImpl {
    * Retrieve the directories into which snapshots have been restored from
    * ({@link #RESTORE_DIRS_KEY})
    * @param conf Configuration to extract restore directories from
-   * @return the directories into which snapshots have been restored from n
+   * @return the directories into which snapshots have been restored from
    */
   public Map<String, Path> getSnapshotDirs(Configuration conf) throws IOException {
     List<Map.Entry<String, String>> kvps = ConfigurationUtil.getKeyValues(conf, RESTORE_DIRS_KEY);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
index 6258399472d..6d163e82e8c 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
@@ -248,7 +248,7 @@ public class RowCounter extends AbstractHBaseTool {
    * Sets filter {@link FilterBase} to the {@link Scan} instance. If provided rowRangeList contains
    * more than one element, method sets filter which is instance of {@link MultiRowRangeFilter}.
    * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. If rowRangeList
-   * contains exactly one element, startRow and stopRow are set to the scan. nn
+   * contains exactly one element, startRow and stopRow are set to the scan.
    */
   private static void setScanFilter(Scan scan, List<MultiRowRangeFilter.RowRange> rowRangeList) {
     final int size = rowRangeList == null ? 0 : rowRangeList.size();
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index da796e12738..b02517451bc 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -362,8 +362,7 @@ public abstract class TableInputFormatBase extends InputFormat<ImmutableBytesWri
    * @param split A TableSplit corresponding to a range of rowkeys
    * @param n     Number of ranges after splitting. Pass 1 means no split for the range Pass 2 if
    *              you want to split the range in two;
-   * @return A list of TableSplit, the size of the list is n
-   * @throws IllegalArgumentIOException throws IllegalArgumentIOException
+   * @return A list of TableSplit, the size of the list is {@code n}
    */
   protected List<InputSplit> createNInputSplitsUniform(InputSplit split, int n)
     throws IllegalArgumentIOException {
@@ -581,7 +580,7 @@ public abstract class TableInputFormatBase extends InputFormat<ImmutableBytesWri
   /**
    * Allows subclasses to initialize the table information.
    * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
-   * @param tableName  The {@link TableName} of the table to process. n
+   * @param tableName  The {@link TableName} of the table to process.
    */
   protected void initializeTable(Connection connection, TableName tableName) throws IOException {
     if (this.table != null || this.connection != null) {
@@ -642,7 +641,7 @@ public abstract class TableInputFormatBase extends InputFormat<ImmutableBytesWri
 
   /**
    * Close the Table and related objects that were initialized via
-   * {@link #initializeTable(Connection, TableName)}. n
+   * {@link #initializeTable(Connection, TableName)}.
    */
   protected void closeTable() throws IOException {
     close(admin, table, regionLocator, connection);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index ddcb6d573af..076d282c0d0 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -848,7 +848,7 @@ public class TableMapReduceUtil {
    * @param my_class        the class to find.
    * @param fs              the FileSystem with which to qualify the returned path.
    * @param packagedClasses a map of class name to path.
-   * @return a jar file that contains the class. n
+   * @return a jar file that contains the class.
    */
   private static Path findOrCreateJar(Class<?> my_class, FileSystem fs,
     Map<String, String> packagedClasses) throws IOException {
@@ -897,7 +897,7 @@ public class TableMapReduceUtil {
    * that is not the first thing on the class path that has a class with the same name. Looks first
    * on the classpath and then in the <code>packagedClasses</code> map.
    * @param my_class the class to find.
-   * @return a jar file that contains the class, or null. n
+   * @return a jar file that contains the class, or null.
    */
   private static String findContainingJar(Class<?> my_class, Map<String, String> packagedClasses)
     throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index e8316c5016f..17c6c0e4551 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -90,7 +90,8 @@ public class TableOutputFormat<KEY> extends OutputFormat<KEY, Mutation> implemen
     private BufferedMutator mutator;
 
     /**
-     * n *
+     *
+    *
      */
     public TableRecordWriter() throws IOException {
       String tableName = conf.get(OUTPUT_TABLE);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
index a0df98796b4..6b22ad1bb0f 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
@@ -70,7 +70,8 @@ public class TableRecordReader extends RecordReader<ImmutableBytesWritable, Resu
 
   /**
    * Returns the current key.
-   * @return The current key. n * @throws InterruptedException When the job is aborted.
+   * @return The current key.
+   * @throws InterruptedException When the job is aborted.
    * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey()
    */
   @Override
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
index 79dfe752be0..2fba0197858 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
@@ -92,7 +92,7 @@ public class TextSortReducer
    * Handles initializing this class with objects specific to it (i.e., the parser). Common
    * initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
    * subclass may choose to override this method and call <code>doSetup</code> as well before
-   * handling it's own custom params. n
+   * handling it's own custom params.
    */
   @Override
   protected void setup(Context context) {
@@ -107,7 +107,7 @@ public class TextSortReducer
   }
 
   /**
-   * Handles common parameter initialization that a subclass might want to leverage. nn
+   * Handles common parameter initialization that a subclass might want to leverage.
    */
   protected void doSetup(Context context, Configuration conf) {
     // If a custom separator has been used,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
index 04c7e87d3b4..fe3077fcf22 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
@@ -93,7 +93,7 @@ public class TsvImporterMapper extends Mapper<LongWritable, Text, ImmutableBytes
    * Handles initializing this class with objects specific to it (i.e., the parser). Common
    * initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
    * subclass may choose to override this method and call <code>doSetup</code> as well before
-   * handling it's own custom params. n
+   * handling it's own custom params.
    */
   @Override
   protected void setup(Context context) {
@@ -109,7 +109,7 @@ public class TsvImporterMapper extends Mapper<LongWritable, Text, ImmutableBytes
   }
 
   /**
-   * Handles common parameter initialization that a subclass might want to leverage. n
+   * Handles common parameter initialization that a subclass might want to leverage.
    */
   protected void doSetup(Context context) {
     Configuration conf = context.getConfiguration();
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
index 3ee760af74d..87ef096ad9e 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
@@ -61,7 +61,7 @@ public class TsvImporterTextMapper
    * Handles initializing this class with objects specific to it (i.e., the parser). Common
    * initialization that might be leveraged by a subclass is done in <code>doSetup</code>. Hence a
    * subclass may choose to override this method and call <code>doSetup</code> as well before
-   * handling it's own custom params. n
+   * handling it's own custom params.
    */
   @Override
   protected void setup(Context context) {
@@ -76,7 +76,7 @@ public class TsvImporterTextMapper
   }
 
   /**
-   * Handles common parameter initialization that a subclass might want to leverage. n
+   * Handles common parameter initialization that a subclass might want to leverage.
    */
   protected void doSetup(Context context) {
     Configuration conf = context.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index ae749f1e870..6a9db659e3d 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -264,7 +264,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
   interface Status {
     /**
      * Sets status
-     * @param msg status message n
+     * @param msg status message
      */
     void setStatus(final String msg) throws IOException;
   }
@@ -542,7 +542,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
   /*
    * Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write
    * out an input file with instruction per client regards which row they are to start on.
-   * @param cmd Command to run. n
+   * @param cmd Command to run.
    */
   static Job doMapReduce(TestOptions opts, final Configuration conf)
     throws IOException, InterruptedException, ClassNotFoundException {
@@ -593,7 +593,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
   /*
    * Write input file of offsets-per-client for the mapreduce job.
    * @param c Configuration
-   * @return Directory that contains file written whose name is JOB_INPUT_FILENAME n
+   * @return Directory that contains file written whose name is JOB_INPUT_FILENAME
    */
   static Path writeInputFile(final Configuration c, final TestOptions opts) throws IOException {
     return writeInputFile(c, opts, new Path("."));
@@ -1355,7 +1355,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
 
     /*
      * Run test
-     * @return Elapsed time. n
+     * @return Elapsed time.
      */
     long test() throws IOException, InterruptedException {
       testSetup();
@@ -2447,8 +2447,9 @@ public class PerformanceEvaluation extends Configured implements Tool {
   }
 
   /*
-   * Format passed integer. n * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version
-   * of passed number (Does absolute in case number is negative).
+   * Format passed integer.
+   * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed number (Does
+   * absolute in case number is negative).
    */
   public static byte[] format(final int number) {
     byte[] b = new byte[ROW_LENGTH];
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
index f9891067e6b..40a785abfb8 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
@@ -110,15 +110,15 @@ public class TestTableInputFormat {
   /**
    * Setup a table with two rows and values.
    * @param tableName the name of the table to create
-   * @return A Table instance for the created table. n
+   * @return A Table instance for the created table.
    */
   public static Table createTable(byte[] tableName) throws IOException {
     return createTable(tableName, new byte[][] { FAMILY });
   }
 
   /**
-   * Setup a table with two rows and values per column family. n * @return A Table instance for the
-   * created table. n
+   * Setup a table with two rows and values per column family.
+   * @return A Table instance for the created table.
    */
   public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
     Table table = UTIL.createTable(TableName.valueOf(tableName), families);
@@ -153,7 +153,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API. nn
+   * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API.
    */
   static void runTestMapred(Table table) throws IOException {
     org.apache.hadoop.hbase.mapred.TableRecordReader trr =
@@ -181,7 +181,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Create a table that IOE's on first scanner next call n
+   * Create a table that IOE's on first scanner next call
    */
   static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException {
     // build up a mock scanner stuff to fail the first time
@@ -212,7 +212,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Create a table that throws a DoNoRetryIOException on first scanner next call n
+   * Create a table that throws a DoNoRetryIOException on first scanner next call
    */
   static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException {
     // build up a mock scanner stuff to fail the first time
@@ -245,7 +245,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming no errors using mapred api. n
+   * Run test assuming no errors using mapred api.
    */
   @Test
   public void testTableRecordReader() throws IOException {
@@ -254,7 +254,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming Scanner IOException failure using mapred api, n
+   * Run test assuming Scanner IOException failure using mapred api,
    */
   @Test
   public void testTableRecordReaderScannerFail() throws IOException {
@@ -263,7 +263,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming Scanner IOException failure using mapred api, n
+   * Run test assuming Scanner IOException failure using mapred api,
    */
   @Test(expected = IOException.class)
   public void testTableRecordReaderScannerFailTwice() throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
index 285b3339373..a9d9c4974f2 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
@@ -200,7 +200,7 @@ public abstract class MultiTableInputFormatTestBase {
   }
 
   /**
-   * Tests a MR scan using specific start and stop rows. nnn
+   * Tests a MR scan using specific start and stop rows.
    */
   private void testScan(String start, String stop, String last)
     throws IOException, InterruptedException, ClassNotFoundException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java
index 2e2d7d1f167..40de2387388 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java
@@ -776,7 +776,7 @@ public class TestCellBasedHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#configureCompression(Configuration, HTableDescriptor)} and
    * {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests that the
-   * compression map is correctly serialized into and deserialized from configuration n
+   * compression map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -839,7 +839,7 @@ public class TestCellBasedHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#configureBloomType(HTableDescriptor, Configuration)} and
    * {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the compression
-   * map is correctly serialized into and deserialized from configuration n
+   * map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -899,7 +899,7 @@ public class TestCellBasedHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#configureBlockSize(HTableDescriptor, Configuration)} and
    * {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the compression
-   * map is correctly serialized into and deserialized from configuration n
+   * map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -962,7 +962,7 @@ public class TestCellBasedHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTableDescriptor, Configuration)}
    * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that the
-   * compression map is correctly serialized into and deserialized from configuration n
+   * compression map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java
index cfa961327ed..34323e46940 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java
@@ -153,8 +153,8 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Runs an export job with the specified command line args n * @return true if job completed
-   * successfully nnn
+   * Runs an export job with the specified command line args
+   * @return true if job completed successfully
    */
   protected boolean runExport(String[] args) throws Throwable {
     // need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -167,8 +167,8 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Runs an import job with the specified command line args n * @return true if job completed
-   * successfully nnn
+   * Runs an import job with the specified command line args
+   * @return true if job completed successfully
    */
   boolean runImport(String[] args) throws Throwable {
     // need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -177,7 +177,7 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Test simple replication case with column mapping n
+   * Test simple replication case with column mapping
    */
   @Test
   public void testSimpleCase() throws Throwable {
@@ -229,7 +229,7 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Test export hbase:meta table n
+   * Test export hbase:meta table
    */
   @Test
   public void testMetaExport() throws Throwable {
@@ -239,7 +239,7 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Test import data from 0.94 exported file n
+   * Test import data from 0.94 exported file
    */
   @Test
   public void testImport94Table() throws Throwable {
@@ -488,7 +488,7 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Count the number of keyvalues in the specified table for the given timerange nnn
+   * Count the number of keyvalues in the specified table for the given timerange
    */
   private int getCount(Table table, Filter filter) throws IOException {
     Scan scan = new Scan();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java
index 4bbacf04210..283acbabf6e 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java
@@ -107,7 +107,7 @@ public class TestCellBasedWALPlayer2 {
   }
 
   /**
-   * Simple end-to-end test n
+   * Simple end-to-end test
    */
   @Test
   public void testWALPlayer() throws Exception {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
index efe5abb3a5a..b1bb14bb712 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
@@ -179,7 +179,7 @@ public class TestCopyTable {
   }
 
   /**
-   * Simple end-to-end test n
+   * Simple end-to-end test
    */
   @Test
   public void testCopyTable() throws Exception {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 52cf4849f83..534b552a581 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -810,7 +810,7 @@ public class TestHFileOutputFormat2 {
 
   /**
    * Test for {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests that the
-   * family compression map is correctly serialized into and deserialized from configuration n
+   * family compression map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -872,7 +872,7 @@ public class TestHFileOutputFormat2 {
 
   /**
    * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the
-   * family bloom type map is correctly serialized into and deserialized from configuration n
+   * family bloom type map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -931,7 +931,7 @@ public class TestHFileOutputFormat2 {
 
   /**
    * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the
-   * family block size map is correctly serialized into and deserialized from configuration n
+   * family block size map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -994,7 +994,7 @@ public class TestHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that
    * the family data block encoding map is correctly serialized into and deserialized from
-   * configuration n
+   * configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index 305695c83c0..81c847b2ab8 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -172,8 +172,8 @@ public class TestImportExport {
   }
 
   /**
-   * Runs an export job with the specified command line args n * @return true if job completed
-   * successfully nnn
+   * Runs an export job with the specified command line args
+   * @return true if job completed successfully
    */
   protected boolean runExport(String[] args) throws Throwable {
     // need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -186,8 +186,8 @@ public class TestImportExport {
   }
 
   /**
-   * Runs an import job with the specified command line args n * @return true if job completed
-   * successfully nnn
+   * Runs an import job with the specified command line args
+   * @return true if job completed successfully
    */
   boolean runImport(String[] args) throws Throwable {
     // need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -196,7 +196,7 @@ public class TestImportExport {
   }
 
   /**
-   * Test simple replication case with column mapping n
+   * Test simple replication case with column mapping
    */
   @Test
   public void testSimpleCase() throws Throwable {
@@ -248,7 +248,7 @@ public class TestImportExport {
   }
 
   /**
-   * Test export hbase:meta table n
+   * Test export hbase:meta table
    */
   @Test
   public void testMetaExport() throws Throwable {
@@ -258,7 +258,7 @@ public class TestImportExport {
   }
 
   /**
-   * Test import data from 0.94 exported file n
... 13419 lines suppressed ...