You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2022/10/06 14:50:49 UTC

[hbase] branch branch-2 updated: HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)

This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new 5cab4be0751 HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)
5cab4be0751 is described below

commit 5cab4be07510241104a273fbf89c9de50272dd4f
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Thu Oct 6 18:17:34 2022 +0800

    HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)
    
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    (cherry picked from commit 63cdd026f08cdde6ac0fde1342ffd050e8e02441)
    
    Conflicts:
            hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
            hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
            hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
            hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
            hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
            hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
            hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
            hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
            hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
            hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java
            hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
---
 .../FanOutOneBlockAsyncDFSOutputSaslHelper.java    |   2 +-
 .../java/org/apache/hadoop/hbase/ClusterId.java    |   7 +-
 .../org/apache/hadoop/hbase/ClusterStatus.java     |   2 +-
 .../org/apache/hadoop/hbase/HColumnDescriptor.java |  10 +-
 .../java/org/apache/hadoop/hbase/HRegionInfo.java  |  65 ++++----
 .../org/apache/hadoop/hbase/HRegionLocation.java   |   6 +-
 .../org/apache/hadoop/hbase/HTableDescriptor.java  |  12 +-
 .../hbase/NotAllMetaRegionsOnlineException.java    |   3 +-
 .../java/org/apache/hadoop/hbase/ServerLoad.java   |   2 +-
 .../org/apache/hadoop/hbase/ServerMetrics.java     |   2 +-
 .../java/org/apache/hadoop/hbase/client/Admin.java |  27 ++--
 .../org/apache/hadoop/hbase/client/Append.java     |  19 +--
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |  12 +-
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |   2 +-
 .../apache/hadoop/hbase/client/AsyncProcess.java   |   2 +-
 .../hadoop/hbase/client/ClusterConnection.java     |   6 +-
 .../hbase/client/ColumnFamilyDescriptor.java       |   2 +-
 .../client/ColumnFamilyDescriptorBuilder.java      |   4 +-
 .../hbase/client/ConnectionImplementation.java     |   2 +-
 .../org/apache/hadoop/hbase/client/Delete.java     |   9 +-
 .../java/org/apache/hadoop/hbase/client/Get.java   |  19 +--
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |   6 +-
 .../hadoop/hbase/client/HTableMultiplexer.java     |   7 +-
 .../hbase/client/ImmutableHColumnDescriptor.java   |   2 +-
 .../hadoop/hbase/client/ImmutableHRegionInfo.java  |   2 +-
 .../hbase/client/ImmutableHTableDescriptor.java    |   2 +-
 .../org/apache/hadoop/hbase/client/Increment.java  |  10 +-
 .../apache/hadoop/hbase/client/MasterCallable.java |   2 +-
 .../org/apache/hadoop/hbase/client/MetaCache.java  |   6 +-
 .../hadoop/hbase/client/MutableRegionInfo.java     |   6 +-
 .../org/apache/hadoop/hbase/client/Mutation.java   |  36 ++---
 .../org/apache/hadoop/hbase/client/Operation.java  |   2 +-
 .../hbase/client/OperationWithAttributes.java      |   2 +-
 .../client/PreemptiveFastFailInterceptor.java      |   9 +-
 .../java/org/apache/hadoop/hbase/client/Put.java   |  13 +-
 .../java/org/apache/hadoop/hbase/client/Query.java |   9 +-
 .../hbase/client/RegionAdminServiceCallable.java   |   2 +-
 .../hadoop/hbase/client/RegionInfoDisplay.java     |  10 +-
 .../hadoop/hbase/client/RegionReplicaUtil.java     |   2 +-
 .../hadoop/hbase/client/RegionServerCallable.java  |   2 +-
 .../org/apache/hadoop/hbase/client/Result.java     |  17 ++-
 .../hbase/client/RetryingCallerInterceptor.java    |   8 +-
 .../client/RetryingCallerInterceptorContext.java   |  10 +-
 .../apache/hadoop/hbase/client/RowMutations.java   |   4 +-
 .../java/org/apache/hadoop/hbase/client/Scan.java  | 103 +++++++------
 .../hadoop/hbase/client/SecureBulkLoadClient.java  |   8 +-
 .../java/org/apache/hadoop/hbase/client/Table.java |  19 +--
 .../hbase/client/TableDescriptorBuilder.java       |  13 +-
 .../org/apache/hadoop/hbase/client/TableState.java |  10 +-
 .../hbase/client/UnmodifyableHRegionInfo.java      |   2 +-
 .../hbase/client/backoff/ServerStatistics.java     |   2 +-
 .../client/metrics/ServerSideScanMetrics.java      |  18 +--
 .../hbase/client/replication/ReplicationAdmin.java |  17 ++-
 .../hbase/coprocessor/ColumnInterpreter.java       |  43 +++---
 .../hbase/coprocessor/CoprocessorException.java    |   2 +-
 .../hbase/exceptions/ClientExceptionsUtil.java     |   2 +-
 .../exceptions/FailedSanityCheckException.java     |   6 +-
 .../hadoop/hbase/filter/ColumnValueFilter.java     |   4 +-
 .../apache/hadoop/hbase/filter/CompareFilter.java  |   3 +-
 .../org/apache/hadoop/hbase/filter/Filter.java     |   5 +-
 .../org/apache/hadoop/hbase/filter/FilterBase.java |   6 +-
 .../org/apache/hadoop/hbase/filter/FilterList.java |   6 +-
 .../apache/hadoop/hbase/filter/FilterListBase.java |   2 +-
 .../apache/hadoop/hbase/filter/FuzzyRowFilter.java |   7 +-
 .../hadoop/hbase/filter/RandomRowFilter.java       |   4 +-
 .../filter/SingleColumnValueExcludeFilter.java     |   8 +-
 .../hbase/filter/SingleColumnValueFilter.java      |  13 +-
 .../hadoop/hbase/filter/TimestampsFilter.java      |   2 +-
 .../apache/hadoop/hbase/ipc/CellBlockBuilder.java  |   7 +-
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |  52 +++----
 .../hadoop/hbase/regionserver/LeaseException.java  |   3 +-
 .../regionserver/wal/FailedLogCloseException.java  |   3 +-
 .../wal/FailedSyncBeforeLogCloseException.java     |   3 +-
 .../hbase/security/AbstractHBaseSaslRpcClient.java |   4 +-
 .../hadoop/hbase/security/EncryptionUtil.java      |   4 +-
 .../hadoop/hbase/security/HBaseSaslRpcClient.java  |   6 +-
 .../hbase/security/access/AccessControlClient.java |  12 +-
 .../hbase/security/access/AccessControlUtil.java   |  39 +++--
 .../security/visibility/VisibilityClient.java      |  32 ++--
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 136 ++++++++---------
 .../hbase/shaded/protobuf/RequestConverter.java    | 108 +++++++++-----
 .../hbase/shaded/protobuf/ResponseConverter.java   |  22 +--
 .../org/apache/hadoop/hbase/util/Writables.java    |   4 +-
 .../apache/hadoop/hbase/zookeeper/ZNodePaths.java  |   4 +-
 .../hadoop/hbase/client/TestClientNoCluster.java   |  23 ++-
 .../hadoop/hbase/client/TestDeleteTimeStamp.java   |   2 +-
 .../java/org/apache/hadoop/hbase/AuthUtil.java     |   6 +-
 .../hadoop/hbase/ByteBufferKeyOnlyKeyValue.java    |   2 +-
 .../java/org/apache/hadoop/hbase/CellBuilder.java  |   2 +-
 .../apache/hadoop/hbase/CellComparatorImpl.java    |   5 +-
 .../java/org/apache/hadoop/hbase/CellUtil.java     |  14 +-
 .../apache/hadoop/hbase/CompoundConfiguration.java |   2 +-
 .../java/org/apache/hadoop/hbase/ExtendedCell.java |   2 +-
 .../apache/hadoop/hbase/HBaseConfiguration.java    |   2 +-
 .../java/org/apache/hadoop/hbase/KeyValue.java     |   4 +-
 .../org/apache/hadoop/hbase/KeyValueTestUtil.java  |   4 +-
 .../java/org/apache/hadoop/hbase/KeyValueUtil.java |  63 ++++----
 .../org/apache/hadoop/hbase/PrivateCellUtil.java   |  72 +++++----
 .../org/apache/hadoop/hbase/codec/BaseDecoder.java |   2 +-
 .../org/apache/hadoop/hbase/codec/CellCodec.java   |   2 +-
 .../hadoop/hbase/codec/CellCodecWithTags.java      |   2 +-
 .../hadoop/hbase/io/ByteBufferOutputStream.java    |   2 +-
 .../hbase/io/ByteBufferWriterOutputStream.java     |   4 +-
 .../apache/hadoop/hbase/io/CellOutputStream.java   |   4 +-
 .../hadoop/hbase/io/ImmutableBytesWritable.java    |   5 +-
 .../hadoop/hbase/io/TagCompressionContext.java     |   8 +-
 .../org/apache/hadoop/hbase/io/crypto/Cipher.java  |   8 +-
 .../apache/hadoop/hbase/io/crypto/Decryptor.java   |   6 +-
 .../apache/hadoop/hbase/io/crypto/Encryption.java  |  22 +--
 .../apache/hadoop/hbase/io/crypto/Encryptor.java   |   6 +-
 .../apache/hadoop/hbase/io/crypto/KeyProvider.java |   6 +-
 .../hadoop/hbase/io/encoding/DataBlockEncoder.java |   7 +-
 .../hbase/io/encoding/DataBlockEncoding.java       |   2 +-
 .../hadoop/hbase/io/encoding/EncodedDataBlock.java |   2 +-
 .../io/encoding/HFileBlockDecodingContext.java     |   6 +-
 .../hbase/io/encoding/IndexBlockEncoding.java      |   2 +-
 .../io/hadoopbackport/ThrottledInputStream.java    |   3 +-
 .../apache/hadoop/hbase/io/util/Dictionary.java    |   4 +-
 .../apache/hadoop/hbase/io/util/StreamUtils.java   |   4 +-
 .../java/org/apache/hadoop/hbase/nio/ByteBuff.java |  21 ++-
 .../org/apache/hadoop/hbase/nio/MultiByteBuff.java |  36 ++---
 .../org/apache/hadoop/hbase/security/User.java     |   2 +-
 .../apache/hadoop/hbase/security/UserProvider.java |   2 +-
 .../hbase/util/AbstractPositionedByteRange.java    |   4 +-
 .../apache/hadoop/hbase/util/ByteBufferUtils.java  |   4 +-
 .../org/apache/hadoop/hbase/util/ByteRange.java    |  11 +-
 .../java/org/apache/hadoop/hbase/util/Bytes.java   |  17 ++-
 .../org/apache/hadoop/hbase/util/ChecksumType.java |   8 +-
 .../java/org/apache/hadoop/hbase/util/Classes.java |   7 +-
 .../apache/hadoop/hbase/util/CommonFSUtils.java    |   2 +-
 .../hadoop/hbase/util/CoprocessorClassLoader.java  |   2 +-
 .../org/apache/hadoop/hbase/util/KeyLocker.java    |   2 +-
 .../java/org/apache/hadoop/hbase/util/MD5Hash.java |   8 +-
 .../java/org/apache/hadoop/hbase/util/Pair.java    |   4 +-
 .../apache/hadoop/hbase/util/PairOfSameType.java   |   4 +-
 .../hadoop/hbase/util/PositionedByteRange.java     |   4 +-
 .../apache/hadoop/hbase/util/PrettyPrinter.java    |   9 +-
 .../hadoop/hbase/util/SimpleMutableByteRange.java  |   8 +-
 .../util/SimplePositionedMutableByteRange.java     |  14 +-
 .../apache/hadoop/hbase/util/TimeMeasurable.java   |   2 +-
 .../org/apache/hadoop/hbase/util/UnsafeAccess.java |   2 +-
 .../hadoop/hbase/util/WindowMovingAverage.java     |   2 +-
 .../apache/hadoop/hbase/zookeeper/ZKConfig.java    |   8 +-
 .../hadoop/hbase/TestHBaseConfiguration.java       |  10 +-
 .../hbase/io/crypto/tls/X509TestContext.java       |   2 +-
 .../hbase/io/crypto/tls/X509TestHelpers.java       |   4 +-
 .../hadoop/hbase/util/RandomDistribution.java      |  15 +-
 .../regionserver/MetricsRegionServerSource.java    |   2 +-
 .../hadoop/hbase/rest/MetricsRESTSource.java       |   2 +-
 .../hbase/thrift/MetricsThriftServerSource.java    |   2 +-
 .../apache/hadoop/metrics2/MetricHistogram.java    |   2 +-
 .../metrics2/util/MetricSampleQuantiles.java       |   4 +-
 .../org/apache/hadoop/hbase/http/HttpServer.java   |   8 +-
 .../hbase/http/ProxyUserAuthenticationFilter.java  |   2 +-
 .../hadoop/hbase/http/jmx/JMXJsonServlet.java      |   4 +-
 .../apache/hadoop/hbase/util/JSONMetricUtil.java   |   2 +-
 .../hadoop/hbase/DistributedHBaseCluster.java      |   2 +-
 .../hbase/mapreduce/IntegrationTestBulkLoad.java   |   2 +-
 .../hbase/test/IntegrationTestBigLinkedList.java   |  18 +--
 .../hbase/test/IntegrationTestReplication.java     |   6 +-
 .../org/apache/hadoop/hbase/mapred/Driver.java     |   3 -
 .../hadoop/hbase/mapred/GroupingTableMap.java      |  10 +-
 .../hadoop/hbase/mapred/IdentityTableMap.java      |   2 +-
 .../hadoop/hbase/mapred/IdentityTableReduce.java   |   2 +-
 .../mapred/MultiTableSnapshotInputFormat.java      |   1 -
 .../org/apache/hadoop/hbase/mapred/RowCounter.java |   7 +-
 .../hadoop/hbase/mapred/TableInputFormatBase.java  |   8 +-
 .../hadoop/hbase/mapred/TableOutputFormat.java     |   3 +-
 .../hadoop/hbase/mapred/TableRecordReader.java     |  14 +-
 .../hadoop/hbase/mapred/TableRecordReaderImpl.java |   2 -
 .../org/apache/hadoop/hbase/mapred/TableSplit.java |   2 +-
 .../apache/hadoop/hbase/mapreduce/CellCreator.java |   8 +-
 .../apache/hadoop/hbase/mapreduce/HashTable.java   |   2 +-
 .../apache/hadoop/hbase/mapreduce/ImportTsv.java   |   6 +-
 .../mapreduce/MultiTableHFileOutputFormat.java     |   2 +-
 .../hbase/mapreduce/MultiTableOutputFormat.java    |  12 +-
 .../MultiTableSnapshotInputFormatImpl.java         |   8 +-
 .../apache/hadoop/hbase/mapreduce/RowCounter.java  |   2 +-
 .../hbase/mapreduce/TableInputFormatBase.java      |   7 +-
 .../hadoop/hbase/mapreduce/TableMapReduceUtil.java |   4 +-
 .../hadoop/hbase/mapreduce/TableOutputFormat.java  |   3 +-
 .../hadoop/hbase/mapreduce/TableRecordReader.java  |   3 +-
 .../hadoop/hbase/mapreduce/TextSortReducer.java    |   4 +-
 .../hadoop/hbase/mapreduce/TsvImporterMapper.java  |   4 +-
 .../hbase/mapreduce/TsvImporterTextMapper.java     |   4 +-
 .../apache/hadoop/hbase/PerformanceEvaluation.java |  13 +-
 .../hadoop/hbase/mapred/TestTableInputFormat.java  |  18 +--
 .../mapreduce/MultiTableInputFormatTestBase.java   |   2 +-
 .../mapreduce/TestCellBasedHFileOutputFormat2.java |   8 +-
 .../mapreduce/TestCellBasedImportExport2.java      |  16 +-
 .../hbase/mapreduce/TestCellBasedWALPlayer2.java   |   2 +-
 .../hadoop/hbase/mapreduce/TestCopyTable.java      |   2 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java    |   8 +-
 .../hadoop/hbase/mapreduce/TestImportExport.java   |  16 +-
 .../TestImportTSVWithOperationAttributes.java      |   8 +-
 .../TestImportTSVWithVisibilityLabels.java         |   6 +-
 .../hadoop/hbase/mapreduce/TestImportTsv.java      |   2 +-
 .../mapreduce/TestMultiTableInputFormatBase.java   |   2 +-
 .../mapreduce/TestMultithreadedTableMapper.java    |   7 +-
 .../hadoop/hbase/mapreduce/TestRowCounter.java     |  26 ++--
 .../hbase/mapreduce/TestTableInputFormat.java      |  25 ++--
 .../hadoop/hbase/mapreduce/TestTableMapReduce.java |   4 +-
 .../hbase/mapreduce/TestTableMapReduceBase.java    |   5 +-
 .../store/wal/ProcedureWALPrettyPrinter.java       |   3 +-
 .../hadoop/hbase/replication/ReplicationPeers.java |   2 +-
 .../apache/hadoop/hbase/rest/ExistsResource.java   |   2 +-
 .../apache/hadoop/hbase/rest/MultiRowResource.java |   2 +-
 .../hbase/rest/NamespacesInstanceResource.java     |   4 +-
 .../hadoop/hbase/rest/NamespacesResource.java      |   2 +-
 .../hadoop/hbase/rest/ProtobufMessageHandler.java  |   2 +-
 .../org/apache/hadoop/hbase/rest/RESTServlet.java  |   2 +-
 .../apache/hadoop/hbase/rest/RegionsResource.java  |   2 +-
 .../org/apache/hadoop/hbase/rest/RootResource.java |   2 +-
 .../org/apache/hadoop/hbase/rest/RowResource.java  |   2 +-
 .../apache/hadoop/hbase/rest/ScannerResource.java  |   2 +-
 .../apache/hadoop/hbase/rest/SchemaResource.java   |   2 +-
 .../hbase/rest/StorageClusterStatusResource.java   |   2 +-
 .../hbase/rest/StorageClusterVersionResource.java  |   2 +-
 .../apache/hadoop/hbase/rest/TableResource.java    |   2 +-
 .../apache/hadoop/hbase/rest/VersionResource.java  |   2 +-
 .../apache/hadoop/hbase/rest/client/Client.java    |  42 +++---
 .../rest/filter/RestCsrfPreventionFilter.java      |   2 +-
 .../apache/hadoop/hbase/rest/model/CellModel.java  |  10 +-
 .../hbase/rest/model/NamespacesInstanceModel.java  |   4 +-
 .../hadoop/hbase/rest/model/NamespacesModel.java   |   2 +-
 .../hadoop/hbase/rest/model/ScannerModel.java      |   6 +-
 .../hadoop/hbase/rest/model/TableInfoModel.java    |   2 +-
 .../apache/hadoop/hbase/rest/model/TableModel.java |   2 +-
 .../hadoop/hbase/rest/client/RemoteAdmin.java      |  16 +-
 .../hadoop/hbase/rest/client/TestRemoteTable.java  |  41 +++++
 .../java/org/apache/hadoop/hbase/HealthReport.java |   4 +-
 .../org/apache/hadoop/hbase/LocalHBaseCluster.java |  14 +-
 .../apache/hadoop/hbase/RegionStateListener.java   |   4 +-
 .../java/org/apache/hadoop/hbase/SplitLogTask.java |   4 +-
 .../apache/hadoop/hbase/backup/HFileArchiver.java  |   7 +-
 .../hadoop/hbase/client/VersionInfoUtil.java       |   3 +-
 .../hadoop/hbase/constraint/Constraints.java       |  85 +++++------
 .../coordination/SplitLogManagerCoordination.java  |   2 +-
 .../ZKSplitLogManagerCoordination.java             |   4 +-
 .../coordination/ZkSplitLogWorkerCoordination.java |   4 +-
 .../hadoop/hbase/coprocessor/CoprocessorHost.java  |   2 +-
 .../hadoop/hbase/coprocessor/MasterObserver.java   |  49 +++---
 .../hadoop/hbase/coprocessor/RegionObserver.java   |  18 +--
 .../hbase/errorhandling/ForeignException.java      |   8 +-
 .../hbase/errorhandling/ForeignExceptionSnare.java |   2 +-
 .../apache/hadoop/hbase/executor/EventHandler.java |   2 +-
 .../hadoop/hbase/executor/ExecutorService.java     |   2 +-
 .../hbase/favored/FavoredNodeAssignmentHelper.java |   8 +-
 .../hadoop/hbase/favored/FavoredNodesPlan.java     |   2 +-
 .../java/org/apache/hadoop/hbase/io/Reference.java |  21 +--
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |   2 +-
 .../hadoop/hbase/io/hfile/BlockCacheUtil.java      |  12 +-
 .../io/hfile/BlockCompressedSizePredicator.java    |   2 +-
 .../hbase/io/hfile/CacheableDeserializer.java      |   2 +-
 .../hbase/io/hfile/CompoundBloomFilterWriter.java  |   6 +-
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java     |  63 ++++----
 .../hbase/io/hfile/HFileDataBlockEncoder.java      |   6 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java     |  12 +-
 .../apache/hadoop/hbase/io/hfile/HFileScanner.java |  21 +--
 .../hadoop/hbase/io/hfile/HFileWriterImpl.java     |   6 +-
 .../hadoop/hbase/io/hfile/InlineBlockWriter.java   |   9 +-
 .../hbase/io/hfile/NoOpIndexBlockEncoder.java      |   8 +-
 .../PreviousBlockCompressionRatePredicator.java    |   2 +-
 .../io/hfile/UncompressedBlockSizePredicator.java  |   2 +-
 .../hbase/io/hfile/bucket/BucketAllocator.java     |   5 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |   5 +-
 .../hbase/io/hfile/bucket/ByteBufferIOEngine.java  |   6 +-
 .../hadoop/hbase/io/hfile/bucket/FileIOEngine.java |  10 +-
 .../hbase/io/hfile/bucket/FileMmapIOEngine.java    |   6 +-
 .../hadoop/hbase/io/hfile/bucket/IOEngine.java     |   6 +-
 .../hadoop/hbase/io/util/MemorySizeUtil.java       |   9 +-
 .../apache/hadoop/hbase/ipc/PriorityFunction.java  |   6 +-
 .../java/org/apache/hadoop/hbase/ipc/RpcCall.java  |   2 +-
 .../apache/hadoop/hbase/ipc/RpcCallContext.java    |   2 +-
 .../hadoop/hbase/ipc/RpcSchedulerContext.java      |   3 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java     |  13 +-
 .../hadoop/hbase/ipc/RpcServerInterface.java       |   2 +-
 .../hadoop/hbase/ipc/ServerRpcConnection.java      |   6 +-
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java       |   6 +-
 .../apache/hadoop/hbase/ipc/SimpleRpcServer.java   |  12 +-
 .../hadoop/hbase/ipc/SimpleRpcServerResponder.java |   6 +-
 .../hbase/ipc/SimpleServerRpcConnection.java       |   2 +-
 .../hbase/master/AssignmentVerificationReport.java |   5 +-
 .../hadoop/hbase/master/DrainingServerTracker.java |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |   3 +-
 .../apache/hadoop/hbase/master/LoadBalancer.java   |  18 ++-
 .../hadoop/hbase/master/MasterCoprocessorHost.java |  24 +--
 .../hadoop/hbase/master/MasterFileSystem.java      |  10 +-
 .../hadoop/hbase/master/MasterRpcServices.java     |   8 +-
 .../apache/hadoop/hbase/master/MasterServices.java |  42 +++---
 .../hbase/master/MetricsAssignmentManager.java     |   8 +-
 .../hbase/master/RegionPlacementMaintainer.java    |  17 ++-
 .../apache/hadoop/hbase/master/ServerManager.java  |  18 +--
 .../hadoop/hbase/master/SplitLogManager.java       |   8 +-
 .../hadoop/hbase/master/TableNamespaceManager.java |   2 +-
 .../assignment/MergeTableRegionsProcedure.java     |   2 +-
 .../hbase/master/balancer/LoadBalancerFactory.java |   3 +-
 .../master/balancer/RegionLocationFinder.java      |   2 +-
 .../master/procedure/CloneSnapshotProcedure.java   |  10 +-
 .../master/procedure/CreateNamespaceProcedure.java |  12 +-
 .../master/procedure/DeleteNamespaceProcedure.java |  20 +--
 .../master/procedure/EnableTableProcedure.java     |  12 +-
 .../master/procedure/ModifyNamespaceProcedure.java |   8 +-
 .../master/procedure/ModifyTableProcedure.java     |   2 +-
 .../master/procedure/RestoreSnapshotProcedure.java |  10 +-
 .../hbase/master/snapshot/SnapshotManager.java     |  29 ++--
 .../hadoop/hbase/mob/DefaultMobStoreFlusher.java   |   2 +-
 .../java/org/apache/hadoop/hbase/mob/MobFile.java  |  12 +-
 .../org/apache/hadoop/hbase/mob/MobFileCache.java  |   2 +-
 .../org/apache/hadoop/hbase/mob/MobFileName.java   |  20 +--
 .../java/org/apache/hadoop/hbase/mob/MobUtils.java |   1 +
 .../hadoop/hbase/monitoring/ThreadMonitoring.java  |   2 +-
 .../hbase/namespace/NamespaceStateManager.java     |   4 +-
 .../hbase/procedure/MasterProcedureManager.java    |   8 +-
 .../apache/hadoop/hbase/procedure/Procedure.java   |  14 +-
 .../hbase/procedure/ProcedureCoordinator.java      |  13 +-
 .../hbase/procedure/ProcedureCoordinatorRpcs.java  |   4 +-
 .../hadoop/hbase/procedure/ProcedureMember.java    |  12 +-
 .../procedure/RegionServerProcedureManager.java    |   4 +-
 .../hadoop/hbase/procedure/Subprocedure.java       |  12 +-
 .../hadoop/hbase/procedure/ZKProcedureUtil.java    |   2 +-
 .../RegionServerFlushTableProcedureManager.java    |  11 +-
 .../hbase/protobuf/ReplicationProtbufUtil.java     |   4 +-
 .../hbase/regionserver/AbstractMemStore.java       |   2 +-
 .../AnnotationReadingPriorityFunction.java         |  15 +-
 .../apache/hadoop/hbase/regionserver/CellSink.java |   2 +-
 .../hadoop/hbase/regionserver/ChunkCreator.java    |   2 +-
 .../hbase/regionserver/CompactingMemStore.java     |   2 +-
 .../hbase/regionserver/FavoredNodesForRegion.java  |   6 +-
 .../hadoop/hbase/regionserver/FlushRequester.java  |   8 +-
 .../hadoop/hbase/regionserver/HMobStore.java       |  18 +--
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  66 ++++----
 .../hbase/regionserver/HRegionFileSystem.java      |  42 +++---
 .../hadoop/hbase/regionserver/HRegionServer.java   |   8 +-
 .../hadoop/hbase/regionserver/HStoreFile.java      |   6 +-
 .../hadoop/hbase/regionserver/HeapMemoryTuner.java |   4 +-
 .../hadoop/hbase/regionserver/InternalScan.java    |   2 +-
 .../hadoop/hbase/regionserver/InternalScanner.java |   6 +-
 .../hadoop/hbase/regionserver/KeyValueHeap.java    |  14 +-
 .../hadoop/hbase/regionserver/KeyValueScanner.java |   2 +-
 .../apache/hadoop/hbase/regionserver/MemStore.java |  17 ++-
 .../hadoop/hbase/regionserver/MemStoreFlusher.java |  20 ++-
 .../regionserver/MiniBatchOperationInProgress.java |  16 +-
 .../MultiVersionConcurrencyControl.java            |   4 +-
 .../hbase/regionserver/MutableOnlineRegions.java   |   2 +-
 .../hadoop/hbase/regionserver/OnlineRegions.java   |   8 +-
 .../hadoop/hbase/regionserver/OperationStatus.java |   9 +-
 .../hadoop/hbase/regionserver/RSRpcServices.java   |  32 ++--
 .../apache/hadoop/hbase/regionserver/Region.java   |  33 ++--
 .../hbase/regionserver/RegionCoprocessorHost.java  |  36 ++---
 .../hadoop/hbase/regionserver/RegionScanner.java   |   2 +-
 .../hbase/regionserver/RegionSplitPolicy.java      |   3 +-
 .../hbase/regionserver/ReplicationSinkService.java |   1 -
 .../hbase/regionserver/ReversedKeyValueHeap.java   |   8 +-
 .../hbase/regionserver/ReversedStoreScanner.java   |   5 +-
 .../hadoop/hbase/regionserver/RowProcessor.java    |   4 +-
 .../apache/hadoop/hbase/regionserver/ScanInfo.java |   4 +-
 .../hadoop/hbase/regionserver/ScannerContext.java  |  34 ++---
 .../hbase/regionserver/SecureBulkLoadManager.java  |   4 +-
 .../hadoop/hbase/regionserver/SegmentFactory.java  |   2 +-
 .../hadoop/hbase/regionserver/ShipperListener.java |   2 +-
 .../hadoop/hbase/regionserver/ShutdownHook.java    |   6 +-
 .../apache/hadoop/hbase/regionserver/Store.java    |   2 +-
 .../hadoop/hbase/regionserver/StoreFileInfo.java   |   4 +-
 .../hadoop/hbase/regionserver/StoreFileReader.java |   6 +-
 .../hbase/regionserver/StoreFileScanner.java       |   4 +-
 .../hbase/regionserver/StoreFlushContext.java      |   6 +-
 .../hadoop/hbase/regionserver/StoreScanner.java    |  19 +--
 .../hbase/regionserver/TimeRangeTracker.java       |   2 +-
 .../compactions/CompactionProgress.java            |   2 +-
 .../compactions/ExploringCompactionPolicy.java     |   8 +-
 .../compactions/SortedCompactionPolicy.java        |   4 +-
 .../regionserver/querymatcher/ColumnTracker.java   |   7 +-
 .../querymatcher/ScanDeleteTracker.java            |   2 +-
 .../querymatcher/ScanQueryMatcher.java             |   6 +-
 .../querymatcher/ScanWildcardColumnTracker.java    |   2 +-
 .../snapshot/RegionServerSnapshotManager.java      |  16 +-
 .../hbase/regionserver/wal/AbstractFSWAL.java      |   4 +-
 .../hbase/regionserver/wal/ProtobufLogReader.java  |   2 +-
 .../regionserver/wal/SequenceIdAccounting.java     |   7 +-
 .../hadoop/hbase/regionserver/wal/SyncFuture.java  |   2 +-
 .../replication/HBaseReplicationEndpoint.java      |   2 +-
 .../regionserver/DumpReplicationQueues.java        |   2 +-
 .../replication/regionserver/MetricsSink.java      |  10 +-
 .../replication/regionserver/MetricsSource.java    |  18 +--
 .../replication/regionserver/Replication.java      |   1 -
 .../replication/regionserver/ReplicationLoad.java  |   2 +-
 .../replication/regionserver/ReplicationSink.java  |   8 +-
 .../regionserver/ReplicationSinkManager.java       |   6 +-
 .../regionserver/ReplicationSourceManager.java     |   2 +-
 .../hbase/security/access/AccessChecker.java       |   2 +-
 .../hbase/security/access/AccessController.java    |   8 +-
 .../hadoop/hbase/security/access/AuthManager.java  |   2 +-
 .../hbase/security/access/ZKPermissionWatcher.java |   4 +-
 .../DefaultVisibilityLabelServiceImpl.java         |   5 +-
 .../security/visibility/ScanLabelGenerator.java    |   3 +-
 .../security/visibility/VisibilityController.java  |   4 +-
 .../visibility/VisibilityLabelService.java         |  43 +++---
 .../visibility/VisibilityLabelServiceManager.java  |   5 +-
 .../security/visibility/VisibilityLabelsCache.java |   6 +-
 .../hbase/security/visibility/VisibilityUtils.java |  27 ++--
 .../visibility/ZKVisibilityLabelWatcher.java       |   4 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java      |   6 +-
 .../hbase/snapshot/SnapshotDescriptionUtils.java   |   3 +-
 .../org/apache/hadoop/hbase/util/BloomContext.java |   4 +-
 .../org/apache/hadoop/hbase/util/BloomFilter.java  |   4 +-
 .../apache/hadoop/hbase/util/BloomFilterChunk.java |   2 +-
 .../hadoop/hbase/util/BloomFilterFactory.java      |  18 +--
 .../apache/hadoop/hbase/util/BloomFilterUtil.java  |  27 ++--
 .../hadoop/hbase/util/DirectMemoryUtils.java       |   4 +-
 .../apache/hadoop/hbase/util/EncryptionTest.java   |   3 +-
 .../java/org/apache/hadoop/hbase/util/FSUtils.java |  55 ++++---
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |  20 +--
 .../apache/hadoop/hbase/util/HBaseFsckRepair.java  |   2 +-
 .../apache/hadoop/hbase/util/JVMClusterUtil.java   |  11 +-
 .../hadoop/hbase/util/ModifyRegionUtils.java       |   8 +-
 .../hadoop/hbase/util/MunkresAssignment.java       |   2 +-
 .../org/apache/hadoop/hbase/util/RegionMover.java  |   6 +-
 .../hadoop/hbase/util/RegionSplitCalculator.java   |   2 +-
 .../apache/hadoop/hbase/util/RegionSplitter.java   |  28 ++--
 .../hadoop/hbase/util/RollingStatCalculator.java   |  10 +-
 .../apache/hadoop/hbase/util/ZKDataMigrator.java   |   3 +-
 .../hbase/util/hbck/HFileCorruptionChecker.java    |  22 +--
 .../main/java/org/apache/hadoop/hbase/wal/WAL.java |  10 +-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java    |  12 +-
 .../apache/hadoop/hbase/wal/WALPrettyPrinter.java  |  53 ++++---
 .../java/org/apache/hadoop/hbase/HBaseCluster.java |   2 +-
 .../org/apache/hadoop/hbase/HBaseTestCase.java     |  21 +--
 .../apache/hadoop/hbase/HBaseTestingUtility.java   | 166 ++++++++++++---------
 .../hadoop/hbase/HFilePerformanceEvaluation.java   |  16 +-
 .../org/apache/hadoop/hbase/MetaMockingUtil.java   |   6 +-
 .../org/apache/hadoop/hbase/MiniHBaseCluster.java  |  32 ++--
 .../hadoop/hbase/TestGlobalMemStoreSize.java       |   4 +-
 .../TestHColumnDescriptorDefaultVersions.java      |   2 +-
 .../org/apache/hadoop/hbase/TestMultiVersions.java |   2 +-
 .../hbase/TestPartialResultsFromClientSide.java    |  28 ++--
 .../apache/hadoop/hbase/TestRegionRebalancing.java |   2 +-
 .../org/apache/hadoop/hbase/TestSerialization.java |   2 +-
 .../org/apache/hadoop/hbase/TimestampTestBase.java |  11 +-
 .../hadoop/hbase/backup/TestHFileArchiving.java    |   6 +-
 .../hadoop/hbase/client/FromClientSideBase.java    |   8 +-
 .../hbase/client/HConnectionTestingUtility.java    |   6 +-
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |  12 +-
 .../hadoop/hbase/client/TestClientTimeouts.java    |   2 +-
 .../hbase/client/TestConnectionImplementation.java |   6 +-
 .../hadoop/hbase/client/TestEnableTable.java       |   2 +-
 .../hadoop/hbase/client/TestFromClientSide3.java   |   2 +-
 .../client/TestFromClientSideScanExcpetion.java    |   2 +-
 .../hbase/client/TestIntraRowPagination.java       |   2 +-
 .../apache/hadoop/hbase/client/TestMetaCache.java  |   2 +-
 .../org/apache/hadoop/hbase/client/TestResult.java |   3 +-
 .../hadoop/hbase/client/TestScannerTimeout.java    |   6 +-
 .../hbase/client/TestScannersFromClientSide.java   |  12 +-
 .../hadoop/hbase/client/TestSizeFailures.java      |   2 +-
 .../hbase/client/TestSmallReversedScanner.java     |   4 +-
 .../hbase/client/TestSnapshotFromClient.java       |   5 +-
 .../hadoop/hbase/client/TestSnapshotMetadata.java  |   2 +-
 .../hadoop/hbase/client/TestTimestampsFilter.java  |   4 +-
 .../hbase/client/locking/TestEntityLocks.java      |   2 +-
 .../client/replication/TestReplicationAdmin.java   |   3 +-
 .../hadoop/hbase/constraint/TestConstraint.java    |  10 +-
 .../hadoop/hbase/constraint/TestConstraints.java   |   6 +-
 .../coprocessor/TestCoreMasterCoprocessor.java     |   2 +-
 .../coprocessor/TestCoreRegionCoprocessor.java     |   2 +-
 .../TestCoreRegionServerCoprocessor.java           |   2 +-
 .../coprocessor/TestOpenTableInCoprocessor.java    |   2 +-
 .../coprocessor/TestRegionObserverBypass.java      |   4 +-
 ...ObserverForAddingMutationsFromCoprocessors.java |   2 +-
 .../coprocessor/TestRegionObserverInterface.java   |   2 +-
 .../hbase/filter/TestColumnPaginationFilter.java   |   4 +-
 .../hbase/filter/TestDependentColumnFilter.java    |   4 +-
 .../org/apache/hadoop/hbase/filter/TestFilter.java |   8 +-
 .../apache/hadoop/hbase/filter/TestFilterList.java |  14 +-
 .../filter/TestFilterListOrOperatorWithBlkCnt.java |   6 +-
 .../hbase/filter/TestInclusiveStopFilter.java      |   4 +-
 .../hbase/filter/TestMultiRowRangeFilter.java      |   6 +-
 .../apache/hadoop/hbase/filter/TestPageFilter.java |   4 +-
 .../hadoop/hbase/filter/TestRandomRowFilter.java   |   4 +-
 .../filter/TestSingleColumnValueExcludeFilter.java |   2 +-
 .../hbase/filter/TestSingleColumnValueFilter.java  |   4 +-
 .../hadoop/hbase/io/TestHalfStoreFileReader.java   |   2 +-
 .../org/apache/hadoop/hbase/io/TestHeapSize.java   |   2 +-
 .../hbase/io/encoding/TestDataBlockEncoders.java   |   6 +-
 .../apache/hadoop/hbase/io/hfile/NanoTimer.java    |   6 +-
 .../hadoop/hbase/io/hfile/RandomKeyValueUtil.java  |   3 +-
 .../apache/hadoop/hbase/io/hfile/TestHFile.java    |   4 +-
 .../hadoop/hbase/io/hfile/TestHFileBlockIndex.java |   3 +-
 .../hbase/io/hfile/TestHFileDataBlockEncoder.java  |   4 +-
 .../hbase/io/hfile/bucket/TestBucketCache.java     |   3 +-
 .../io/hfile/bucket/TestBucketCacheRefCnt.java     |   2 -
 .../io/hfile/bucket/TestBucketWriterThread.java    |   8 +-
 .../hadoop/hbase/master/MockRegionServer.java      |   5 +-
 .../hbase/master/TestActiveMasterManager.java      |   2 +-
 .../TestMasterFailoverBalancerPersistence.java     |   6 +-
 .../hadoop/hbase/master/TestMasterNoCluster.java   |   6 +-
 .../hadoop/hbase/master/TestMasterTransitions.java |  10 +-
 .../hadoop/hbase/master/TestRegionPlacement.java   |  10 +-
 .../hbase/master/balancer/BalancerTestBase.java    |   4 +-
 .../master/balancer/TestBaseLoadBalancer.java      |   5 +-
 .../master/balancer/TestSimpleLoadBalancer.java    |   2 +-
 .../balancer/TestStochasticBalancerJmxMetrics.java |   2 +-
 .../hbase/master/janitor/TestCatalogJanitor.java   |   2 +-
 .../janitor/TestCatalogJanitorInMemoryStates.java  |   4 +-
 .../procedure/MasterProcedureTestingUtility.java   |   2 +-
 .../TestTableDescriptorModificationFromClient.java |   2 +-
 .../org/apache/hadoop/hbase/mob/MobTestUtil.java   |   2 +-
 .../hbase/procedure/SimpleRSProcedureManager.java  |   2 +-
 .../procedure/TestZKProcedureControllers.java      |   4 +-
 .../hadoop/hbase/protobuf/TestProtobufUtil.java    |  12 +-
 .../hbase/protobuf/TestReplicationProtobuf.java    |   2 +-
 .../hbase/regionserver/CreateRandomStoreFile.java  |   2 +-
 .../hbase/regionserver/DataBlockEncodingTool.java  |   4 +-
 .../hbase/regionserver/TestCompactingMemStore.java |  10 +-
 .../hadoop/hbase/regionserver/TestCompaction.java  |   2 +-
 .../hbase/regionserver/TestCompactionState.java    |   5 +-
 .../TestDateTieredCompactionPolicy.java            |   2 +-
 .../hbase/regionserver/TestDefaultMemStore.java    |  18 +--
 .../hbase/regionserver/TestDeleteMobTable.java     |   2 +-
 .../regionserver/TestGetClosestAtOrBefore.java     |   4 +-
 .../hadoop/hbase/regionserver/TestHMobStore.java   |  12 +-
 .../hadoop/hbase/regionserver/TestHRegion.java     |  29 ++--
 .../regionserver/TestHRegionReplayEvents.java      |   4 +-
 .../hadoop/hbase/regionserver/TestHStoreFile.java  |   2 +-
 .../hbase/regionserver/TestJoinedScanners.java     |   3 +-
 .../hbase/regionserver/TestMajorCompaction.java    |   4 +-
 .../regionserver/TestPerColumnFamilyFlush.java     |   2 +-
 .../regionserver/TestRSKilledWhenInitializing.java |   2 +-
 .../hbase/regionserver/TestRegionIncrement.java    |   4 +-
 .../regionserver/TestRegionReplicaFailover.java    |   2 +-
 .../TestRegionServerOnlineConfigChange.java        |   2 +-
 .../regionserver/TestRequestsPerSecondMetric.java  |   2 +-
 .../hadoop/hbase/regionserver/TestRowTooBig.java   |   4 +-
 .../hadoop/hbase/regionserver/TestScanner.java     |   2 +-
 .../TestSplitTransactionOnCluster.java             |   7 +-
 .../hbase/regionserver/TestStoreScanner.java       |   4 +-
 .../querymatcher/TestUserScanQueryMatcher.java     |   6 +-
 .../hbase/regionserver/wal/AbstractTestFSWAL.java  |   8 +-
 .../regionserver/wal/AbstractTestProtobufLog.java  |   4 +-
 .../regionserver/wal/AbstractTestWALReplay.java    |  20 ++-
 .../hbase/regionserver/wal/TestLogRolling.java     |   2 +-
 .../regionserver/wal/TestLogRollingNoCluster.java  |   2 +-
 .../TestReplicationDisableInactivePeer.java        |   2 +-
 .../regionserver/TestReplicationSink.java          |  10 +-
 .../regionserver/TestReplicationSourceManager.java |   8 +-
 .../regionserver/TestWALEntrySinkFilter.java       |   2 +-
 .../hbase/security/AbstractTestSecureIPC.java      |   2 +-
 .../TestUsersOperationsWithSecureHadoop.java       |   2 +-
 .../hbase/snapshot/SnapshotTestingUtils.java       |   2 +-
 .../TestLoadIncrementalHFilesSplitRecovery.java    |   4 +-
 .../hadoop/hbase/util/BaseTestHBaseFsck.java       |  13 +-
 .../hadoop/hbase/util/HFileArchiveTestingUtil.java |   4 +-
 .../org/apache/hadoop/hbase/util/MockServer.java   |   2 +-
 .../hbase/util/ProcessBasedLocalHBaseCluster.java  |   2 +-
 .../org/apache/hadoop/hbase/util/TestFSUtils.java  |   2 +-
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java    |   2 +-
 .../hbase/util/test/LoadTestDataGenerator.java     |  10 +-
 .../apache/hadoop/hbase/wal/IOTestProvider.java    |   2 +-
 .../hadoop/hbase/wal/TestFSHLogProvider.java       |   7 +-
 .../apache/hadoop/hbase/wal/TestWALFactory.java    |   6 +-
 .../apache/hadoop/hbase/wal/TestWALMethods.java    |   2 +-
 .../hbase/wal/TestWALOpenAfterDNRollingStart.java  |   2 +-
 .../org/apache/hadoop/hbase/wal/TestWALSplit.java  |   4 +-
 .../hadoop/hbase/wal/WALPerformanceEvaluation.java |   5 +-
 .../hadoop/hbase/thrift/HBaseServiceHandler.java   |   2 +-
 .../hadoop/hbase/thrift/IncrementCoalescer.java    |   2 +-
 .../hbase/thrift/ThriftHBaseServiceHandler.java    |   2 +-
 .../hadoop/hbase/thrift/ThriftUtilities.java       |  20 +--
 .../hadoop/hbase/thrift/TestThriftServer.java      |  20 +--
 .../TestThriftHBaseServiceHandlerWithLabels.java   |   5 +-
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   |   4 +-
 .../hbase/zookeeper/RecoverableZooKeeper.java      |   5 +-
 .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java  |  30 ++--
 572 files changed, 2564 insertions(+), 2516 deletions(-)

diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
index 4ac46e8cc5d..00b6631379b 100644
--- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
+++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
@@ -368,7 +368,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
        * Create a ByteString from byte array without copying (wrap), and then set it as the payload
        * for the builder.
        * @param builder builder for HDFS DataTransferEncryptorMessage.
-       * @param payload byte array of payload. n
+       * @param payload byte array of payload.
        */
       static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
         byte[] payload) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
index 8c675c4522e..67438677dad 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
@@ -53,7 +53,8 @@ public class ClusterId {
   /**
    * Parse the serialized representation of the {@link ClusterId}
    * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
-   * @return An instance of {@link ClusterId} made from <code>bytes</code> n * @see #toByteArray()
+   * @return An instance of {@link ClusterId} made from <code>bytes</code>
+   * @see #toByteArray()
    */
   public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
     if (ProtobufUtil.isPBMagicPrefix(bytes)) {
@@ -79,9 +80,7 @@ public class ClusterId {
     return builder.setClusterId(this.id).build();
   }
 
-  /**
-   * n * @return A {@link ClusterId} made from the passed in <code>cid</code>
-   */
+  /** Returns A {@link ClusterId} made from the passed in <code>cid</code> */
   public static ClusterId convert(final ClusterIdProtos.ClusterId cid) {
     return new ClusterId(cid.getClusterId());
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index f1e8d4bf147..d21d610126a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -275,7 +275,7 @@ public class ClusterStatus implements ClusterMetrics {
   }
 
   /**
-   * n * @return Server's load or null if not found.
+   * @return Server's load or null if not found.
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link #getLiveServerMetrics} instead.
    */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 0a25fe2dfea..d2d39a4c415 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -356,8 +356,8 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
 
   /**
    * Set whether the tags should be compressed along with DataBlockEncoding. When no
-   * DataBlockEncoding is been used, this is having no effect. n * @return this (for chained
-   * invocation)
+   * DataBlockEncoding is been used, this is having no effect.
+   * @return this (for chained invocation)
    */
   public HColumnDescriptor setCompressTags(boolean value) {
     getDelegateeForModification().setCompressTags(value);
@@ -687,8 +687,8 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
   /**
    * Parse a serialized representation of a {@link HColumnDescriptor}
    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
-   * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code> n * @see
-   *         #toByteArray()
+   * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
+   * @see #toByteArray()
    */
   public static HColumnDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
     ColumnFamilyDescriptor desc = ColumnFamilyDescriptorBuilder.parseFrom(bytes);
@@ -732,7 +732,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
   }
 
   /**
-   * Set the encryption algorithm for use with this family n
+   * Set the encryption algorithm for use with this family
    */
   public HColumnDescriptor setEncryptionType(String value) {
     getDelegateeForModification().setEncryptionType(value);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 2f4d6377888..33d7d98c61e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -94,7 +94,7 @@ public class HRegionInfo implements RegionInfo {
   private static final int MAX_REPLICA_ID = 0xFFFF;
 
   /**
-   * n * @return the encodedName
+   * @return the encodedName
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#encodeRegionName(byte[])}.
    */
@@ -212,7 +212,7 @@ public class HRegionInfo implements RegionInfo {
    * Construct HRegionInfo with explicit parameters
    * @param tableName the table name
    * @param startKey  first key in region
-   * @param endKey    end of key range n
+   * @param endKey    end of key range
    */
   public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey)
     throws IllegalArgumentException {
@@ -225,7 +225,7 @@ public class HRegionInfo implements RegionInfo {
    * @param startKey  first key in region
    * @param endKey    end of key range
    * @param split     true if this region has split and we have daughter regions regions that may or
-   *                  may not hold references to this region. n
+   *                  may not hold references to this region.
    */
   public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
     final boolean split) throws IllegalArgumentException {
@@ -405,7 +405,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Gets the start key from the specified region name. n * @return Start key.
+   * Gets the start key from the specified region name.
+   * @return Start key.
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#getStartKey(byte[])}.
    */
@@ -415,9 +416,10 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Separate elements of a regionName. n * @return Array of byte[] containing tableName, startKey
-   * and id n * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
-   * {@link RegionInfo#parseRegionName(byte[])}.
+   * Separate elements of a regionName.
+   * @return Array of byte[] containing tableName, startKey and id
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
+   *             {@link RegionInfo#parseRegionName(byte[])}.
    */
   @Deprecated
   @InterfaceAudience.Private
@@ -426,9 +428,9 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * n * @return if region name is encoded. n * @deprecated As of release 2.0.0, this will be
-   * removed in HBase 3.0.0 Use
-   * {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}.
+   * @return if region name is encoded.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
+   *             {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}.
    */
   @Deprecated
   public static boolean isEncodedRegionName(byte[] regionName) throws IOException {
@@ -494,7 +496,7 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get current table name of the region n
+   * Get current table name of the region
    */
   @Override
   public TableName getTable() {
@@ -741,7 +743,8 @@ public class HRegionInfo implements RegionInfo {
   /**
    * Parse a serialized representation of a {@link HRegionInfo}.
    * @param bytes A pb RegionInfo serialized with a pb magic prefix.
-   * @return A deserialized {@link HRegionInfo} n * @see #toByteArray()
+   * @return A deserialized {@link HRegionInfo}
+   * @see #toByteArray()
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[])}.
    */
@@ -755,7 +758,8 @@ public class HRegionInfo implements RegionInfo {
    * @param bytes  A pb RegionInfo serialized with a pb magic prefix.
    * @param offset starting point in the byte array
    * @param len    length to read on the byte array
-   * @return A deserialized {@link HRegionInfo} n * @see #toByteArray()
+   * @return A deserialized {@link HRegionInfo}
+   * @see #toByteArray()
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[], int, int)}.
    */
@@ -780,8 +784,8 @@ public class HRegionInfo implements RegionInfo {
   /**
    * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use the pb
    * mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want).
-   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n * @see
-   *         #toByteArray()
+   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
+   * @see #toByteArray()
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link RegionInfo#toDelimitedByteArray(RegionInfo)}.
    */
@@ -791,8 +795,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally nn
-   * * @return descriptive string
+   * Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally
+   * @return descriptive string
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getDescriptiveNameFromRegionStateForDisplay(RegionState,
    *             Configuration) over in hbase-server module.
@@ -805,7 +809,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the end key for display. Optionally hide the real end key. nn * @return the endkey
+   * Get the end key for display. Optionally hide the real end key.
+   * @return the endkey
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getEndKeyForDisplay(RegionInfo, Configuration) over in
    *             hbase-server module.
@@ -817,7 +822,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the start key for display. Optionally hide the real start key. nn * @return the startkey
+   * Get the start key for display. Optionally hide the real start key.
+   * @return the startkey
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getStartKeyForDisplay(RegionInfo, Configuration) over in
    *             hbase-server module.
@@ -829,8 +835,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the region name for display. Optionally hide the start key. nn * @return region name as
-   * String
+   * Get the region name for display. Optionally hide the start key.
+   * @return region name as String
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getRegionNameAsStringForDisplay(RegionInfo, Configuration) over
    *             in hbase-server module.
@@ -842,7 +848,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get the region name for display. Optionally hide the start key. nn * @return region name bytes
+   * Get the region name for display. Optionally hide the start key.
+   * @return region name bytes
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             RegionInfoDisplay#getRegionNameForDisplay(RegionInfo, Configuration) over in
    *             hbase-server module.
@@ -855,9 +862,10 @@ public class HRegionInfo implements RegionInfo {
 
   /**
    * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was
-   * serialized to the stream with {@link #toDelimitedByteArray()} n * @return An instance of
-   * HRegionInfo. n * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
-   * {@link RegionInfo#parseFrom(DataInputStream)}.
+   * serialized to the stream with {@link #toDelimitedByteArray()}
+   * @return An instance of HRegionInfo.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
+   *             {@link RegionInfo#parseFrom(DataInputStream)}.
    */
   @Deprecated
   @InterfaceAudience.Private
@@ -885,8 +893,8 @@ public class HRegionInfo implements RegionInfo {
    * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can be
    * used to read back the instances.
    * @param infos HRegionInfo objects to serialize
-   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n * @see
-   *         #toByteArray()
+   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
+   * @see #toByteArray()
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link RegionInfo#toDelimitedByteArray(RegionInfo...)}.
    */
@@ -927,7 +935,8 @@ public class HRegionInfo implements RegionInfo {
   }
 
   /**
-   * Check whether two regions are adjacent nn * @return true if two regions are adjacent
+   * Check whether two regions are adjacent
+   * @return true if two regions are adjacent
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link org.apache.hadoop.hbase.client.RegionInfo#areAdjacent(RegionInfo, RegionInfo)}.
    */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index a7720b2734e..3180baa17a6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -82,7 +82,7 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
   }
 
   /**
-   * @return Immutable HRegionInfo
+   * Returns immutable HRegionInfo
    * @deprecated Since 2.0.0. Will remove in 3.0.0. Use {@link #getRegion()}} instead.
    */
   @Deprecated
@@ -90,9 +90,7 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
     return regionInfo == null ? null : new ImmutableHRegionInfo(regionInfo);
   }
 
-  /**
-   * n
-   */
+  /** Returns regionInfo */
   public RegionInfo getRegion() {
     return regionInfo;
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index f5448e61737..808cb5a4060 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -356,7 +356,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
   }
 
   /**
-   * Get the name of the table n
+   * Get the name of the table
    */
   @Override
   public TableName getTableName() {
@@ -728,7 +728,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. It won't check if the class can be
    * loaded or not. Whether a coprocessor is loadable or not will be determined when a region is
    * opened.
-   * @param className Full class name. n
+   * @param className Full class name.
    */
   public HTableDescriptor addCoprocessor(String className) throws IOException {
     getDelegateeForModification().setCoprocessor(className);
@@ -744,7 +744,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    *                    classloader.
    * @param className   Full class name.
    * @param priority    Priority
-   * @param kvs         Arbitrary key-value parameter pairs passed into the coprocessor. n
+   * @param kvs         Arbitrary key-value parameter pairs passed into the coprocessor.
    */
   public HTableDescriptor addCoprocessor(String className, Path jarFilePath, int priority,
     final Map<String, String> kvs) throws IOException {
@@ -760,7 +760,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
    * loaded or not. Whether a coprocessor is loadable or not will be determined when a region is
    * opened.
    * @param specStr The Coprocessor specification all in in one String formatted so matches
-   *                {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN} n
+   *                {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
    */
   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
     getDelegateeForModification().setCoprocessorWithSpec(specStr);
@@ -844,8 +844,8 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
   /**
    * Parse the serialized representation of a {@link HTableDescriptor}
    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
-   * @return An instance of {@link HTableDescriptor} made from <code>bytes</code> nn * @see
-   *         #toByteArray()
+   * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
+   * @see #toByteArray()
    */
   public static HTableDescriptor parseFrom(final byte[] bytes)
     throws DeserializationException, IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
index a15833ac17a..bc156353a1b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
@@ -34,8 +34,7 @@ public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException {
   }
 
   /**
-   * n
-   */
+   *   */
   public NotAllMetaRegionsOnlineException(String message) {
     super(message);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 714a1412553..eec0ac5cdca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -393,7 +393,7 @@ public class ServerLoad implements ServerMetrics {
   }
 
   /**
-   * Call directly from client such as hbase shell n
+   * Call directly from client such as hbase shell
    */
   @Override
   public ReplicationLoadSink getReplicationLoadSink() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
index 5cf190c04f9..0a2dd28f6f8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -70,7 +70,7 @@ public interface ServerMetrics {
   Map<String, List<ReplicationLoadSource>> getReplicationLoadSourceMap();
 
   /**
-   * Call directly from client such as hbase shell n
+   * Call directly from client such as hbase shell
    */
   @Nullable
   ReplicationLoadSink getReplicationLoadSink();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index f812d4f478f..5d9f54ad9e1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -533,9 +533,10 @@ public interface Admin extends Abortable, Closeable {
    * Disable table and wait on completion. May timeout eventually. Use
    * {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
    * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
-   * enabled state for it to be disabled. n * @throws IOException There could be couple types of
-   * IOException TableNotFoundException means the table doesn't exist. TableNotEnabledException
-   * means the table isn't in enabled state.
+   * enabled state for it to be disabled.
+   * @throws IOException There could be couple types of IOException TableNotFoundException means the
+   *                     table doesn't exist. TableNotEnabledException means the table isn't in
+   *                     enabled state.
    */
   default void disableTable(TableName tableName) throws IOException {
     get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
@@ -934,7 +935,7 @@ public interface Admin extends Abortable, Closeable {
    * then it returns. It does not wait on the completion of Compaction (it can take a while).
    * @param tableName   table to compact
    * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   void compact(TableName tableName, CompactType compactType)
     throws IOException, InterruptedException;
@@ -946,7 +947,7 @@ public interface Admin extends Abortable, Closeable {
    * @param tableName    table to compact
    * @param columnFamily column family within a table
    * @param compactType  {@link org.apache.hadoop.hbase.client.CompactType}
-   * @throws IOException if not a mob column family or if a remote or network exception occurs n
+   * @throws IOException if not a mob column family or if a remote or network exception occurs
    */
   void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
     throws IOException, InterruptedException;
@@ -995,7 +996,7 @@ public interface Admin extends Abortable, Closeable {
    * while).
    * @param tableName   table to compact
    * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   void majorCompact(TableName tableName, CompactType compactType)
     throws IOException, InterruptedException;
@@ -1007,7 +1008,7 @@ public interface Admin extends Abortable, Closeable {
    * @param tableName    table to compact
    * @param columnFamily column family within a table
    * @param compactType  {@link org.apache.hadoop.hbase.client.CompactType}
-   * @throws IOException if not a mob column family or if a remote or network exception occurs n
+   * @throws IOException if not a mob column family or if a remote or network exception occurs
    */
   void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType)
     throws IOException, InterruptedException;
@@ -1018,10 +1019,10 @@ public interface Admin extends Abortable, Closeable {
    * can take a while).
    * @param sn    the region server name
    * @param major if it's major compaction
-   * @throws IOException if a remote or network exception occurs n * @deprecated As of release
-   *                     2.0.0, this will be removed in HBase 3.0.0. Use
-   *                     {@link #compactRegionServer(ServerName)} or
-   *                     {@link #majorCompactRegionServer(ServerName)}.
+   * @throws IOException if a remote or network exception occurs
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
+   *             {@link #compactRegionServer(ServerName)} or
+   *             {@link #majorCompactRegionServer(ServerName)}.
    */
   @Deprecated
   default void compactRegionServer(ServerName sn, boolean major)
@@ -2668,7 +2669,7 @@ public interface Admin extends Abortable, Closeable {
 
   /**
    * Return the set of supported security capabilities.
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   List<SecurityCapability> getSecurityCapabilities() throws IOException;
 
@@ -3012,7 +3013,7 @@ public interface Admin extends Abortable, Closeable {
    * Clear compacting queues on a regionserver.
    * @param serverName the region server name
    * @param queues     the set of queue name
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   void clearCompactionQueues(ServerName serverName, Set<String> queues)
     throws IOException, InterruptedException;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 31a57eacf4d..8a49ef6ab78 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -61,7 +61,7 @@ public class Append extends Mutation {
    * <p>
    * This range is used as [minStamp, maxStamp).
    * @param minStamp minimum timestamp value, inclusive
-   * @param maxStamp maximum timestamp value, exclusive n
+   * @param maxStamp maximum timestamp value, exclusive
    */
   public Append setTimeRange(long minStamp, long maxStamp) {
     tr = new TimeRange(minStamp, maxStamp);
@@ -69,7 +69,7 @@ public class Append extends Mutation {
   }
 
   /**
-   * Gets the TimeRange used for this append. n
+   * Gets the TimeRange used for this append.
    */
   public TimeRange getTimeRange() {
     return this.tr;
@@ -81,7 +81,7 @@ public class Append extends Mutation {
   }
 
   /**
-   * n * True (default) if the append operation should return the results. A client that is not
+   * True (default) if the append operation should return the results. A client that is not
    * interested in the result can save network bandwidth setting this to false.
    */
   @Override
@@ -120,7 +120,7 @@ public class Append extends Mutation {
    * Create a Append operation for the specified row.
    * <p>
    * At least one column must be appended to.
-   * @param rowArray Makes a copy out of this buffer. nn
+   * @param rowArray Makes a copy out of this buffer.
    */
   public Append(final byte[] rowArray, final int rowOffset, final int rowLength) {
     checkRow(rowArray, rowOffset, rowLength);
@@ -142,9 +142,9 @@ public class Append extends Mutation {
    * Add the specified column and value to this Append operation.
    * @param family    family name
    * @param qualifier column qualifier
-   * @param value     value to append to specified column n * @deprecated As of release 2.0.0, this
-   *                  will be removed in HBase 3.0.0. Use {@link #addColumn(byte[], byte[], byte[])}
-   *                  instead
+   * @param value     value to append to specified column
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
+   *             {@link #addColumn(byte[], byte[], byte[])} instead
    */
   @Deprecated
   public Append add(byte[] family, byte[] qualifier, byte[] value) {
@@ -155,7 +155,7 @@ public class Append extends Mutation {
    * Add the specified column and value to this Append operation.
    * @param family    family name
    * @param qualifier column qualifier
-   * @param value     value to append to specified column n
+   * @param value     value to append to specified column
    */
   public Append addColumn(byte[] family, byte[] qualifier, byte[] value) {
     KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, KeyValue.Type.Put, value);
@@ -163,7 +163,8 @@ public class Append extends Mutation {
   }
 
   /**
-   * Add column and value to this Append operation. n * @return This instance
+   * Add column and value to this Append operation.
+   * @return This instance
    */
   @SuppressWarnings("unchecked")
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 138b3b8d238..f071e58e5d9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -206,7 +206,7 @@ public interface AsyncAdmin {
   CompletableFuture<Void> enableTable(TableName tableName);
 
   /**
-   * Disable a table. The table has to be in enabled state for it to be disabled. n
+   * Disable a table. The table has to be in enabled state for it to be disabled.
    */
   CompletableFuture<Void> disableTable(TableName tableName);
 
@@ -1134,7 +1134,7 @@ public interface AsyncAdmin {
   CompletableFuture<Void> stopMaster();
 
   /**
-   * Stop the designated regionserver. n
+   * Stop the designated regionserver.
    */
   CompletableFuture<Void> stopRegionServer(ServerName serverName);
 
@@ -1336,8 +1336,8 @@ public interface AsyncAdmin {
   CompletableFuture<Boolean> normalize(NormalizeTableFilterParams ntfp);
 
   /**
-   * Turn the cleaner chore on/off. n * @return Previous cleaner state wrapped by a
-   * {@link CompletableFuture}
+   * Turn the cleaner chore on/off.
+   * @return Previous cleaner state wrapped by a {@link CompletableFuture}
    */
   CompletableFuture<Boolean> cleanerChoreSwitch(boolean on);
 
@@ -1356,8 +1356,8 @@ public interface AsyncAdmin {
   CompletableFuture<Boolean> runCleanerChore();
 
   /**
-   * Turn the catalog janitor on/off. n * @return the previous state wrapped by a
-   * {@link CompletableFuture}
+   * Turn the catalog janitor on/off.
+   * @return the previous state wrapped by a {@link CompletableFuture}
    */
   CompletableFuture<Boolean> catalogJanitorSwitch(boolean on);
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index af1de927e7e..c91b8cf3225 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -179,7 +179,7 @@ public class AsyncConnectionImpl implements AsyncConnection {
   }
 
   /**
-   * If choreService has not been created yet, create the ChoreService. n
+   * If choreService has not been created yet, create the ChoreService.
    */
   synchronized ChoreService getChoreService() {
     if (isClosed()) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 46abc3753cc..7a4ed3f5b36 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -198,7 +198,7 @@ class AsyncProcess {
    * The submitted task may be not accomplished at all if there are too many running tasks or other
    * limits.
    * @param <CResult> The class to cast the result
-   * @param task      The setting and data n
+   * @param task      The setting and data
    */
   public <CResult> AsyncRequestFuture submit(AsyncProcessTask<CResult> task)
     throws InterruptedIOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 219e63a6ffc..a63be4d1f41 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -57,8 +57,8 @@ public interface ClusterConnection extends Connection {
   /**
    * Use this api to check if the table has been created with the specified number of splitkeys
    * which was used while creating the given table. Note : If this api is used after a table's
-   * region gets splitted, the api may return false. n * tableName n * splitKeys used while creating
-   * table n * if a remote or network exception occurs
+   * region gets splitted, the api may return false. tableName splitKeys used while creating table
+   * if a remote or network exception occurs
    */
   boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException;
 
@@ -261,7 +261,7 @@ public interface ClusterConnection extends Connection {
    * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. This
    * RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be
    * intercepted with the configured {@link RetryingCallerInterceptor}
-   * @param conf configuration n
+   * @param conf configuration
    */
   RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf);
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
index 6a092a221fd..369b2be8ecd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
@@ -133,7 +133,7 @@ public interface ColumnFamilyDescriptor {
   int getMinVersions();
 
   /**
-   * Get the mob compact partition policy for this family n
+   * Get the mob compact partition policy for this family
    */
   MobCompactPartitionPolicy getMobCompactPartitionPolicy();
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index 87d92b7d904..d3e8242d09a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -866,8 +866,8 @@ public class ColumnFamilyDescriptorBuilder {
 
     /**
      * Set whether the tags should be compressed along with DataBlockEncoding. When no
-     * DataBlockEncoding is been used, this is having no effect. n * @return this (for chained
-     * invocation)
+     * DataBlockEncoding is been used, this is having no effect.
+     * @return this (for chained invocation)
      */
     public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) {
       return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 4c828036fd0..cef1a9b787c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -633,7 +633,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
   }
 
   /**
-   * If choreService has not been created yet, create the ChoreService. n
+   * If choreService has not been created yet, create the ChoreService.
    */
   synchronized ChoreService getChoreService() {
     if (choreService == null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 4b500b09d6e..78752c9ec4a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -95,7 +95,7 @@ public class Delete extends Mutation {
    * <p>
    * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you
    * must specify each timestamp individually.
-   * @param row We make a local copy of this passed in row. nn
+   * @param row We make a local copy of this passed in row.
    */
   public Delete(final byte[] row, final int rowOffset, final int rowLength) {
     this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@@ -139,8 +139,9 @@ public class Delete extends Mutation {
   /**
    * Advanced use only. Add an existing delete marker to this Delete object.
    * @param kv An existing KeyValue of type "delete".
-   * @return this for invocation chaining n * @deprecated As of release 2.0.0, this will be removed
-   *         in HBase 3.0.0. Use {@link #add(Cell)} instead
+   * @return this for invocation chaining
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)}
+   *             instead
    */
   @SuppressWarnings("unchecked")
   @Deprecated
@@ -151,7 +152,7 @@ public class Delete extends Mutation {
   /**
    * Add an existing delete marker to this Delete object.
    * @param cell An existing cell of type "delete".
-   * @return this for invocation chaining n
+   * @return this for invocation chaining
    */
   @Override
   public Delete add(Cell cell) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index 1128015d459..9d692ceaa50 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -87,7 +87,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Copy-constructor n
+   * Copy-constructor
    */
   public Get(Get get) {
     this(get.getRow());
@@ -126,7 +126,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Create a Get operation for the specified row. nnn
+   * Create a Get operation for the specified row.
    */
   public Get(byte[] row, int rowOffset, int rowLength) {
     Mutation.checkRow(row, rowOffset, rowLength);
@@ -134,7 +134,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Create a Get operation for the specified row. n
+   * Create a Get operation for the specified row.
    */
   public Get(ByteBuffer row) {
     Mutation.checkRow(row);
@@ -208,7 +208,8 @@ public class Get extends Query implements Row {
   /**
    * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp).
    * @param minStamp minimum timestamp value, inclusive
-   * @param maxStamp maximum timestamp value, exclusive n * @return this for invocation chaining
+   * @param maxStamp maximum timestamp value, exclusive
+   * @return this for invocation chaining
    */
   public Get setTimeRange(long minStamp, long maxStamp) throws IOException {
     tr = new TimeRange(minStamp, maxStamp);
@@ -351,7 +352,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Method for retrieving the get's row n
+   * Method for retrieving the get's row
    */
   @Override
   public byte[] getRow() {
@@ -383,7 +384,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Method for retrieving the get's TimeRange n
+   * Method for retrieving the get's TimeRange
    */
   public TimeRange getTimeRange() {
     return this.tr;
@@ -414,7 +415,7 @@ public class Get extends Query implements Row {
   }
 
   /**
-   * Method for retrieving the get's familyMap n
+   * Method for retrieving the get's familyMap
    */
   public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
     return this.familyMap;
@@ -422,7 +423,7 @@ public class Get extends Query implements Row {
 
   /**
    * Compile the table and column family (i.e. schema) information into a String. Useful for parsing
-   * and aggregation by debugging, logging, and administration tools. n
+   * and aggregation by debugging, logging, and administration tools.
    */
   @Override
   public Map<String, Object> getFingerprint() {
@@ -439,7 +440,7 @@ public class Get extends Query implements Row {
    * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
    * Map along with the fingerprinted information. Useful for debugging, logging, and administration
    * tools.
-   * @param maxCols a limit on the number of columns output prior to truncation n
+   * @param maxCols a limit on the number of columns output prior to truncation
    */
   @Override
   public Map<String, Object> toMap(int maxCols) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index e3392d7dfce..5d140818730 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -1193,7 +1193,7 @@ public class HBaseAdmin implements Admin {
   }
 
   /**
-   * n * @return List of {@link HRegionInfo}.
+   * @return List of {@link HRegionInfo}.
    * @throws IOException if a remote or network exception occurs
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link #getRegions(ServerName)}.
@@ -1417,7 +1417,7 @@ public class HBaseAdmin implements Admin {
    * @param regionName   region to compact
    * @param columnFamily column family within a table or region
    * @param major        True if we are to do a major compaction.
-   * @throws IOException if a remote or network exception occurs n
+   * @throws IOException if a remote or network exception occurs
    */
   private void compactRegion(final byte[] regionName, final byte[] columnFamily,
     final boolean major) throws IOException {
@@ -2369,7 +2369,7 @@ public class HBaseAdmin implements Admin {
   }
 
   /**
-   * n * @return List of {@link HRegionInfo}.
+   * @return List of {@link HRegionInfo}.
    * @throws IOException if a remote or network exception occurs
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
    *             {@link #getRegions(TableName)}.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index ddfe7ca439a..c4c95d73c2b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -129,8 +129,8 @@ public class HTableMultiplexer {
 
   /**
    * The put request will be buffered by its corresponding buffer queue. Return false if the queue
-   * is already full. nn * @return true if the request can be accepted by its corresponding buffer
-   * queue.
+   * is already full.
+   * @return true if the request can be accepted by its corresponding buffer queue.
    */
   public boolean put(TableName tableName, final Put put) {
     return put(tableName, put, this.maxAttempts);
@@ -138,7 +138,8 @@ public class HTableMultiplexer {
 
   /**
    * The puts request will be buffered by their corresponding buffer queue. Return the list of puts
-   * which could not be queued. nn * @return the list of puts which could not be queued
+   * which could not be queued.
+   * @return the list of puts which could not be queued
    */
   public List<Put> put(TableName tableName, final List<Put> puts) {
     if (puts == null) return null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
index 5f758c7a5e1..6564601b22c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class ImmutableHColumnDescriptor extends HColumnDescriptor {
   /*
-   * Create an unmodifyable copy of an HColumnDescriptor n
+   * Create an unmodifyable copy of an HColumnDescriptor
    */
   ImmutableHColumnDescriptor(final HColumnDescriptor desc) {
     super(desc, false);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java
index f95dbeb001e..952ac4f7719 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 public class ImmutableHRegionInfo extends HRegionInfo {
 
   /*
-   * Creates an immutable copy of an HRegionInfo. n
+   * Creates an immutable copy of an HRegionInfo.
    */
   public ImmutableHRegionInfo(RegionInfo other) {
     super(other);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
index 2cce334e359..9200c85daa1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
@@ -41,7 +41,7 @@ public class ImmutableHTableDescriptor extends HTableDescriptor {
   }
 
   /*
-   * Create an unmodifyable copy of an HTableDescriptor n
+   * Create an unmodifyable copy of an HTableDescriptor
    */
   public ImmutableHTableDescriptor(final HTableDescriptor desc) {
     super(desc, false);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
index 86c0b65cc43..1f3e664826e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
@@ -92,7 +92,8 @@ public class Increment extends Mutation {
 
   /**
    * Add the specified KeyValue to this operation.
-   * @param cell individual Cell n * @throws java.io.IOException e
+   * @param cell individual Cell
+   * @throws java.io.IOException e
    */
   @Override
   public Increment add(Cell cell) throws IOException {
@@ -121,7 +122,7 @@ public class Increment extends Mutation {
   }
 
   /**
-   * Gets the TimeRange used for this increment. n
+   * Gets the TimeRange used for this increment.
    */
   public TimeRange getTimeRange() {
     return this.tr;
@@ -139,7 +140,7 @@ public class Increment extends Mutation {
    * This range is used as [minStamp, maxStamp).
    * @param minStamp minimum timestamp value, inclusive
    * @param maxStamp maximum timestamp value, exclusive
-   * @throws IOException if invalid time range n
+   * @throws IOException if invalid time range
    */
   public Increment setTimeRange(long minStamp, long maxStamp) throws IOException {
     tr = new TimeRange(minStamp, maxStamp);
@@ -209,8 +210,7 @@ public class Increment extends Mutation {
   }
 
   /**
-   * n
-   */
+   *   */
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
index 7a919f5ad5f..e7c600ee50b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
@@ -107,7 +107,7 @@ abstract class MasterCallable<V> implements RetryingCallable<V>, Closeable {
    * configured to make this rpc call, use getRpcController(). We are trying to contain
    * rpcController references so we don't pollute codebase with protobuf references; keep the
    * protobuf references contained and only present in a few classes rather than all about the code
-   * base. n
+   * base.
    */
   protected abstract V rpcCall() throws Exception;
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
index dd19588d307..e0b3240b023 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
@@ -186,9 +186,7 @@ public class MetaCache {
     }
   }
 
-  /**
-   * n * @return Map of cached locations for passed <code>tableName</code>
-   */
+  /** Returns Map of cached locations for passed <code>tableName</code> */
   private ConcurrentNavigableMap<byte[], RegionLocations>
     getTableLocations(final TableName tableName) {
     // find the map of cached locations for this table
@@ -287,7 +285,7 @@ public class MetaCache {
 
   /**
    * Delete a cached location, no matter what it is. Called when we were told to not use cache.
-   * @param tableName tableName n
+   * @param tableName tableName
    */
   public void clearCache(final TableName tableName, final byte[] row) {
     ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index fbb76ea4f65..a9382f3a9be 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -179,7 +179,7 @@ class MutableRegionInfo implements RegionInfo {
   }
 
   /**
-   * Get current table name of the region n
+   * Get current table name of the region
    */
   @Override
   public TableName getTable() {
@@ -231,7 +231,7 @@ class MutableRegionInfo implements RegionInfo {
 
   /**
    * Change the split status flag.
-   * @param split set split status n
+   * @param split set split status
    */
   public MutableRegionInfo setSplit(boolean split) {
     this.split = split;
@@ -252,7 +252,7 @@ class MutableRegionInfo implements RegionInfo {
   /**
    * The parent of a region split is offline while split daughters hold references to the parent.
    * Offlined regions are closed.
-   * @param offLine Set online/offline status. n
+   * @param offLine Set online/offline status.
    */
   public MutableRegionInfo setOffline(boolean offLine) {
     this.offLine = offLine;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index ac1e9fce079..6f946c029c1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -201,7 +201,7 @@ public abstract class Mutation extends OperationWithAttributes
    * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
    * Map along with the fingerprinted information. Useful for debugging, logging, and administration
    * tools.
-   * @param maxCols a limit on the number of columns output prior to truncation n
+   * @param maxCols a limit on the number of columns output prior to truncation
    */
   @Override
   public Map<String, Object> toMap(int maxCols) {
@@ -264,7 +264,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Set the durability for this mutation n
+   * Set the durability for this mutation
    */
   public Mutation setDurability(Durability d) {
     this.durability = d;
@@ -277,7 +277,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Method for retrieving the put's familyMap n
+   * Method for retrieving the put's familyMap
    */
   public NavigableMap<byte[], List<Cell>> getFamilyCellMap() {
     return this.familyMap;
@@ -305,7 +305,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Method for retrieving the delete's row n
+   * Method for retrieving the delete's row
    */
   @Override
   public byte[] getRow() {
@@ -323,8 +323,9 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Method for retrieving the timestamp n * @deprecated As of release 2.0.0, this will be removed
-   * in HBase 3.0.0. Use {@link #getTimestamp()} instead
+   * Method for retrieving the timestamp
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
+   *             {@link #getTimestamp()} instead
    */
   @Deprecated
   public long getTimeStamp() {
@@ -332,7 +333,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Method for retrieving the timestamp. n
+   * Method for retrieving the timestamp.
    */
   public long getTimestamp() {
     return this.ts;
@@ -368,7 +369,7 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Sets the visibility expression associated with cells in this Mutation. n
+   * Sets the visibility expression associated with cells in this Mutation.
    */
   public Mutation setCellVisibility(CellVisibility expression) {
     this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@@ -384,8 +385,8 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
-   * protocol buffer CellVisibility
+   * Create a protocol buffer CellVisibility based on a client CellVisibility.
+   * @return a protocol buffer CellVisibility
    */
   static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
     ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@@ -394,8 +395,8 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
-   * client CellVisibility
+   * Convert a protocol buffer CellVisibility to a client CellVisibility
+   * @return the converted client CellVisibility
    */
   private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
     if (proto == null) return null;
@@ -403,8 +404,8 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
-   * converted client CellVisibility n
+   * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
+   * @return the converted client CellVisibility
    */
   private static CellVisibility toCellVisibility(byte[] protoBytes)
     throws DeserializationException {
@@ -511,7 +512,7 @@ public abstract class Mutation extends OperationWithAttributes
 
   /**
    * Set the TTL desired for the result of the mutation, in milliseconds.
-   * @param ttl the TTL desired for the result of the mutation, in milliseconds n
+   * @param ttl the TTL desired for the result of the mutation, in milliseconds
    */
   public Mutation setTTL(long ttl) {
     setAttribute(OP_ATTRIBUTE_TTL, Bytes.toBytes(ttl));
@@ -688,8 +689,9 @@ public abstract class Mutation extends OperationWithAttributes
   }
 
   /**
-   * @param row Row to check nn * @throws IllegalArgumentException Thrown if <code>row</code> is
-   *            empty or null or &gt; {@link HConstants#MAX_ROW_LENGTH}
+   * @param row Row to check
+   * @throws IllegalArgumentException Thrown if <code>row</code> is empty or null or &gt;
+   *                                  {@link HConstants#MAX_ROW_LENGTH}
    * @return <code>row</code>
    */
   static byte[] checkRow(final byte[] row, final int offset, final int length) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
index a517f0bb43a..2cad5ef7325 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
@@ -94,7 +94,7 @@ public abstract class Operation {
   /**
    * Produces a string representation of this Operation. It defaults to a JSON representation, but
    * falls back to a string representation of the fingerprint and details in the case of a JSON
-   * encoding failure. n
+   * encoding failure.
    */
   @Override
   public String toString() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
index e34c9d6eacb..33c1d853e1a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
@@ -106,7 +106,7 @@ public abstract class OperationWithAttributes extends Operation implements Attri
    * This method allows you to set an identifier on an operation. The original motivation for this
    * was to allow the identifier to be used in slow query logging, but this could obviously be
    * useful in other places. One use of this could be to put a class.method identifier in here to
-   * see where the slow query is coming from. n * id to set for the scan
+   * see where the slow query is coming from. id to set for the scan
    */
   public OperationWithAttributes setId(String id) {
     setAttribute(ID_ATRIBUTE, Bytes.toBytes(id));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
index edca3c7b6b6..4bfc43fc32d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
@@ -132,7 +132,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
   /**
    * Handles failures encountered when communicating with a server. Updates the FailureInfo in
    * repeatedFailuresMap to reflect the failure. Throws RepeatedConnectException if the client is in
-   * Fast fail mode. nn * - the throwable to be handled. n
+   * Fast fail mode. - the throwable to be handled.
    */
   protected void handleFailureToServer(ServerName serverName, Throwable t) {
     if (serverName == null || t == null) {
@@ -201,7 +201,8 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
   /**
    * Checks to see if we are in the Fast fail mode for requests to the server. If a client is unable
    * to contact a server for more than fastFailThresholdMilliSec the client will get into fast fail
-   * mode. n * @return true if the client is in fast fail mode for the server.
+   * mode.
+   * @return true if the client is in fast fail mode for the server.
    */
   private boolean inFastFailMode(ServerName server) {
     FailureInfo fInfo = repeatedFailuresMap.get(server);
@@ -225,7 +226,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
    * Check to see if the client should try to connnect to the server, inspite of knowing that it is
    * in the fast fail mode. The idea here is that we want just one client thread to be actively
    * trying to reconnect, while all the other threads trying to reach the server will short circuit.
-   * n * @return true if the client should try to connect to the server.
+   * @return true if the client should try to connect to the server.
    */
   protected boolean shouldRetryInspiteOfFastFail(FailureInfo fInfo) {
     // We believe that the server is down, But, we want to have just one
@@ -246,7 +247,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
   }
 
   /**
-   * This function updates the Failure info for a particular server after the attempt to nnnn
+   * This function updates the Failure info for a particular server after the attempt to
    */
   private void updateFailureInfoForServer(ServerName server, FailureInfo fInfo, boolean didTry,
     boolean couldNotCommunicate, boolean retryDespiteFastFailMode) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
index df26f091ff7..d2701f31628 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
@@ -60,7 +60,7 @@ public class Put extends Mutation implements HeapSize {
   }
 
   /**
-   * We make a copy of the passed in row key to keep local. nnn
+   * We make a copy of the passed in row key to keep local.
    */
   public Put(byte[] rowArray, int rowOffset, int rowLength) {
     this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@@ -88,7 +88,7 @@ public class Put extends Mutation implements HeapSize {
   }
 
   /**
-   * We make a copy of the passed in row key to keep local. nnnn
+   * We make a copy of the passed in row key to keep local.
    */
   public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) {
     checkRow(rowArray, rowOffset, rowLength);
@@ -155,7 +155,7 @@ public class Put extends Mutation implements HeapSize {
    * Add the specified column and value to this Put operation.
    * @param family    family name
    * @param qualifier column qualifier
-   * @param value     column value n
+   * @param value     column value
    */
   public Put addColumn(byte[] family, byte[] qualifier, byte[] value) {
     return addColumn(family, qualifier, this.ts, value);
@@ -178,7 +178,7 @@ public class Put extends Mutation implements HeapSize {
    * @param family    family name
    * @param qualifier column qualifier
    * @param ts        version timestamp
-   * @param value     column value n
+   * @param value     column value
    */
   public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) {
     if (ts < 0) {
@@ -222,7 +222,7 @@ public class Put extends Mutation implements HeapSize {
    * @param family    family name
    * @param qualifier column qualifier
    * @param ts        version timestamp
-   * @param value     column value n
+   * @param value     column value
    */
   public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) {
     if (ts < 0) {
@@ -255,7 +255,8 @@ public class Put extends Mutation implements HeapSize {
   /**
    * Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is
    * immutable and its backing array will not be modified for the duration of this Put.
-   * @param cell individual cell n * @throws java.io.IOException e
+   * @param cell individual cell
+   * @throws java.io.IOException e
    */
   @Override
   public Put add(Cell cell) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index a954d84db0e..5f129ef9cff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -47,9 +47,6 @@ public abstract class Query extends OperationWithAttributes {
   protected Map<byte[], TimeRange> colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
   protected Boolean loadColumnFamiliesOnDemand = null;
 
-  /**
-   * n
-   */
   public Filter getFilter() {
     return filter;
   }
@@ -67,7 +64,7 @@ public abstract class Query extends OperationWithAttributes {
   }
 
   /**
-   * Sets the authorizations to be used by this Query n
+   * Sets the authorizations to be used by this Query
    */
   public Query setAuthorizations(Authorizations authorizations) {
     this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@@ -133,7 +130,7 @@ public abstract class Query extends OperationWithAttributes {
    * Specify region replica id where Query will fetch data from. Use this together with
    * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a
    * specific replicaId. <br>
-   * <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing n
+   * <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing
    */
   public Query setReplicaId(int Id) {
     this.targetReplicaId = Id;
@@ -209,7 +206,7 @@ public abstract class Query extends OperationWithAttributes {
    * Column Family time ranges take precedence over the global time range.
    * @param cf       the column family for which you want to restrict
    * @param minStamp minimum timestamp value, inclusive
-   * @param maxStamp maximum timestamp value, exclusive n
+   * @param maxStamp maximum timestamp value, exclusive
    */
 
   public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 8b2ed345306..fea19680034 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -184,7 +184,7 @@ public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<
    * Run RPC call.
    * @param rpcController PayloadCarryingRpcController is a mouthful but it at a minimum is a facade
    *                      on protobuf so we don't have to put protobuf everywhere; we can keep it
-   *                      behind this class. n
+   *                      behind this class.
    */
   protected abstract T call(HBaseRpcController rpcController) throws Exception;
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
index 58163a2d74a..3f353b5799d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
@@ -59,7 +59,8 @@ public class RegionInfoDisplay {
   }
 
   /**
-   * Get the start key for display. Optionally hide the real start key. nn * @return the startkey
+   * Get the start key for display. Optionally hide the real start key.
+   * @return the startkey
    */
   public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) {
     boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
@@ -68,15 +69,16 @@ public class RegionInfoDisplay {
   }
 
   /**
-   * Get the region name for display. Optionally hide the start key. nn * @return region name as
-   * String
+   * Get the region name for display. Optionally hide the start key.
+   * @return region name as String
    */
   public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuration conf) {
     return Bytes.toStringBinary(getRegionNameForDisplay(ri, conf));
   }
 
   /**
-   * Get the region name for display. Optionally hide the start key. nn * @return region name bytes
+   * Get the region name for display. Optionally hide the start key.
+   * @return region name bytes
    */
   public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) {
     boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
index 1d6708b49d1..df1bfd61ca6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
@@ -84,7 +84,7 @@ public class RegionReplicaUtil {
   }
 
   /**
-   * Removes the non-default replicas from the passed regions collection n
+   * Removes the non-default replicas from the passed regions collection
    */
   public static void removeNonDefaultRegions(Collection<RegionInfo> regions) {
     Iterator<RegionInfo> iterator = regions.iterator();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index abe4058b99d..a97c6da80ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -132,7 +132,7 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
    * configured to make this rpc call, use getRpcController(). We are trying to contain
    * rpcController references so we don't pollute codebase with protobuf references; keep the
    * protobuf references contained and only present in a few classes rather than all about the code
-   * base. n
+   * base.
    */
   protected abstract T rpcCall() throws Exception;
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index ced6b5ea442..bcaf7721c0a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -186,7 +186,7 @@ public class Result implements CellScannable, CellScanner {
 
   /**
    * Method for retrieving the row key that corresponds to the row from which this Result was
-   * created. n
+   * created.
    */
   public byte[] getRow() {
     if (this.row == null) {
@@ -227,8 +227,9 @@ public class Result implements CellScannable, CellScanner {
    * or Get) only requested 1 version the list will contain at most 1 entry. If the column did not
    * exist in the result set (either the column does not exist or the column was not selected in the
    * query) the list will be empty. Also see getColumnLatest which returns just a Cell
-   * @param family the family n * @return a list of Cells for this column or empty list if the
-   *               column did not exist in the result set
+   * @param family the family
+   * @return a list of Cells for this column or empty list if the column did not exist in the result
+   *         set
    */
   public List<Cell> getColumnCells(byte[] family, byte[] qualifier) {
     List<Cell> result = new ArrayList<>();
@@ -324,7 +325,7 @@ public class Result implements CellScannable, CellScanner {
   }
 
   /**
-   * The Cell for the most recent timestamp for a given column. nn *
+   * The Cell for the most recent timestamp for a given column.
    * @return the Cell for the column, or null if no value exists in the row or none have been
    *         selected in the query (Get/Scan)
    */
@@ -677,8 +678,7 @@ public class Result implements CellScannable, CellScanner {
   }
 
   /**
-   * n
-   */
+   *   */
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
@@ -800,7 +800,8 @@ public class Result implements CellScannable, CellScanner {
   }
 
   /**
-   * Get total size of raw cells n * @return Total size.
+   * Get total size of raw cells
+   * @return Total size.
    */
   public static long getTotalSizeOfCells(Result result) {
     long size = 0;
@@ -816,7 +817,7 @@ public class Result implements CellScannable, CellScanner {
   /**
    * Copy another Result into this one. Needed for the old Mapred framework
    * @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT (which is supposed
-   *                                       to be immutable). n
+   *                                       to be immutable).
    */
   public void copyFrom(Result other) {
     checkReadonly();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java
index cb3b2fd3cd6..719b6b2aae7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java
@@ -46,20 +46,20 @@ abstract class RetryingCallerInterceptor {
   public abstract RetryingCallerInterceptorContext createEmptyContext();
 
   /**
-   * Call this function in case we caught a failure during retries. n * : The context object that we
-   * obtained previously. n * : The exception that we caught in this particular try n
+   * Call this function in case we caught a failure during retries. : The context object that we
+   * obtained previously. : The exception that we caught in this particular try
    */
   public abstract void handleFailure(RetryingCallerInterceptorContext context, Throwable t)
     throws IOException;
 
   /**
-   * Call this function alongside the actual call done on the callable. nn
+   * Call this function alongside the actual call done on the callable.
    */
   public abstract void intercept(
     RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) throws IOException;
 
   /**
-   * Call this function to update at the end of the retry. This is not necessary to happen. n
+   * Call this function to update at the end of the retry. This is not necessary to happen.
    */
   public abstract void updateFailureInfo(RetryingCallerInterceptorContext context);
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java
index b810de46c44..177777624d6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorContext.java
@@ -38,17 +38,17 @@ abstract class RetryingCallerInterceptorContext {
 
   /**
    * This prepares the context object by populating it with information specific to the
-   * implementation of the {@link RetryingCallerInterceptor} along with which this will be used. n *
-   * : The {@link RetryingCallable} that contains the information about the call that is being made.
+   * implementation of the {@link RetryingCallerInterceptor} along with which this will be used. :
+   * The {@link RetryingCallable} that contains the information about the call that is being made.
    * @return A new {@link RetryingCallerInterceptorContext} object that can be used for use in the
    *         current retrying call
    */
   public abstract RetryingCallerInterceptorContext prepare(RetryingCallable<?> callable);
 
   /**
-   * Telescopic extension that takes which of the many retries we are currently in. n * : The
-   * {@link RetryingCallable} that contains the information about the call that is being made. n * :
-   * The retry number that we are currently in.
+   * Telescopic extension that takes which of the many retries we are currently in. : The
+   * {@link RetryingCallable} that contains the information about the call that is being made. : The
+   * retry number that we are currently in.
    * @return A new context object that can be used for use in the current retrying call
    */
   public abstract RetryingCallerInterceptorContext prepare(RetryingCallable<?> callable, int tries);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
index b5857fb1364..7be5ecb01cc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
@@ -39,8 +39,8 @@ public class RowMutations implements Row {
 
   /**
    * Create a {@link RowMutations} with the specified mutations.
-   * @param mutations the mutations to send n * @throws IOException if any row in mutations is
-   *                  different to another
+   * @param mutations the mutations to send
+   * @throws IOException if any row in mutations is different to another
    */
   public static RowMutations of(List<? extends Mutation> mutations) throws IOException {
     if (CollectionUtils.isEmpty(mutations)) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 44ef44b3b67..cdc467d9ca9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -326,7 +326,7 @@ public class Scan extends Query {
    * Get all columns from the specified family.
    * <p>
    * Overrides previous calls to addColumn for this family.
-   * @param family family name n
+   * @param family family name
    */
   public Scan addFamily(byte[] family) {
     familyMap.remove(family);
@@ -339,7 +339,7 @@ public class Scan extends Query {
    * <p>
    * Overrides previous calls to addFamily for this family.
    * @param family    family name
-   * @param qualifier column qualifier n
+   * @param qualifier column qualifier
    */
   public Scan addColumn(byte[] family, byte[] qualifier) {
     NavigableSet<byte[]> set = familyMap.get(family);
@@ -361,7 +361,7 @@ public class Scan extends Query {
    * @param minStamp minimum timestamp value, inclusive
    * @param maxStamp maximum timestamp value, exclusive
    * @see #setMaxVersions()
-   * @see #setMaxVersions(int) n
+   * @see #setMaxVersions(int)
    */
   public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
     tr = new TimeRange(minStamp, maxStamp);
@@ -374,8 +374,9 @@ public class Scan extends Query {
    * number of versions beyond the defaut.
    * @param timestamp version timestamp
    * @see #setMaxVersions()
-   * @see #setMaxVersions(int) n * @deprecated As of release 2.0.0, this will be removed in HBase
-   *      3.0.0. Use {@link #setTimestamp(long)} instead
+   * @see #setMaxVersions(int)
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
+   *             {@link #setTimestamp(long)} instead
    */
   @Deprecated
   public Scan setTimeStamp(long timestamp) throws IOException {
@@ -388,7 +389,7 @@ public class Scan extends Query {
    * number of versions beyond the defaut.
    * @param timestamp version timestamp
    * @see #setMaxVersions()
-   * @see #setMaxVersions(int) n
+   * @see #setMaxVersions(int)
    */
   public Scan setTimestamp(long timestamp) {
     try {
@@ -417,9 +418,9 @@ public class Scan extends Query {
    * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
    * unexpected or even undefined.
    * </p>
-   * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if
-   *                 startRow does not meet criteria for a row key (when length exceeds
-   *                 {@link HConstants#MAX_ROW_LENGTH})
+   * @param startRow row to start scanner at or after
+   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStartRow(byte[])}
    *             instead. This method may change the inclusive of the stop row to keep compatible
    *             with the old behavior.
@@ -441,9 +442,9 @@ public class Scan extends Query {
    * <p>
    * If the specified row does not exist, the Scanner will start from the next closest row after the
    * specified row.
-   * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if
-   *                 startRow does not meet criteria for a row key (when length exceeds
-   *                 {@link HConstants#MAX_ROW_LENGTH})
+   * @param startRow row to start scanner at or after
+   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
   public Scan withStartRow(byte[] startRow) {
     return withStartRow(startRow, true);
@@ -460,9 +461,9 @@ public class Scan extends Query {
    * unexpected or even undefined.
    * </p>
    * @param startRow  row to start scanner at or after
-   * @param inclusive whether we should include the start row when scan n * @throws
-   *                  IllegalArgumentException if startRow does not meet criteria for a row key
-   *                  (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
+   * @param inclusive whether we should include the start row when scan
+   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
   public Scan withStartRow(byte[] startRow, boolean inclusive) {
     if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
@@ -483,9 +484,9 @@ public class Scan extends Query {
    * {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
    * unexpected or even undefined.
    * </p>
-   * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does
-   *                not meet criteria for a row key (when length exceeds
-   *                {@link HConstants#MAX_ROW_LENGTH})
+   * @param stopRow row to end at (exclusive)
+   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStopRow(byte[])} instead.
    *             This method may change the inclusive of the stop row to keep compatible with the
    *             old behavior.
@@ -510,9 +511,9 @@ public class Scan extends Query {
    * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
    * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
    * </p>
-   * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does
-   *                not meet criteria for a row key (when length exceeds
-   *                {@link HConstants#MAX_ROW_LENGTH})
+   * @param stopRow row to end at (exclusive)
+   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
   public Scan withStopRow(byte[] stopRow) {
     return withStopRow(stopRow, false);
@@ -529,9 +530,9 @@ public class Scan extends Query {
    * unexpected or even undefined.
    * </p>
    * @param stopRow   row to end at
-   * @param inclusive whether we should include the stop row when scan n * @throws
-   *                  IllegalArgumentException if stopRow does not meet criteria for a row key (when
-   *                  length exceeds {@link HConstants#MAX_ROW_LENGTH})
+   * @param inclusive whether we should include the stop row when scan
+   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+   *                                  exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
   public Scan withStopRow(byte[] stopRow, boolean inclusive) {
     if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
@@ -559,11 +560,10 @@ public class Scan extends Query {
    * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
    * a combination will yield unexpected and even undefined results.
    * </p>
-   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n
-   *                  * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method
-   *                  is considered to be confusing as it does not use a {@link Filter} but uses
-   *                  setting the startRow and stopRow instead. Use
-   *                  {@link #setStartStopRowForPrefixScan(byte[])} instead.
+   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
+   * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method is considered to be
+   *             confusing as it does not use a {@link Filter} but uses setting the startRow and
+   *             stopRow instead. Use {@link #setStartStopRowForPrefixScan(byte[])} instead.
    */
   public Scan setRowPrefixFilter(byte[] rowPrefix) {
     return setStartStopRowForPrefixScan(rowPrefix);
@@ -585,7 +585,7 @@ public class Scan extends Query {
    * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
    * a combination will yield unexpected and even undefined results.
    * </p>
-   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n
+   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
    */
   public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) {
     if (rowPrefix == null) {
@@ -599,9 +599,9 @@ public class Scan extends Query {
   }
 
   /**
-   * Get all available versions. n * @deprecated since 2.0.0 and will be removed in 3.0.0. It is
-   * easy to misunderstand with column family's max versions, so use {@link #readAllVersions()}
-   * instead.
+   * Get all available versions.
+   * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column
+   *             family's max versions, so use {@link #readAllVersions()} instead.
    * @see #readAllVersions()
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a>
    */
@@ -612,9 +612,9 @@ public class Scan extends Query {
 
   /**
    * Get up to the specified number of versions of each column.
-   * @param maxVersions maximum versions for each column n * @deprecated since 2.0.0 and will be
-   *                    removed in 3.0.0. It is easy to misunderstand with column family's max
-   *                    versions, so use {@link #readVersions(int)} instead.
+   * @param maxVersions maximum versions for each column
+   * @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column
+   *             family's max versions, so use {@link #readVersions(int)} instead.
    * @see #readVersions(int)
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a>
    */
@@ -624,7 +624,7 @@ public class Scan extends Query {
   }
 
   /**
-   * Get all available versions. n
+   * Get all available versions.
    */
   public Scan readAllVersions() {
     this.maxVersions = Integer.MAX_VALUE;
@@ -633,7 +633,7 @@ public class Scan extends Query {
 
   /**
    * Get up to the specified number of versions of each column.
-   * @param versions specified number of versions for each column n
+   * @param versions specified number of versions for each column
    */
   public Scan readVersions(int versions) {
     this.maxVersions = versions;
@@ -711,7 +711,7 @@ public class Scan extends Query {
 
   /**
    * Setting the familyMap
-   * @param familyMap map of family to qualifier n
+   * @param familyMap map of family to qualifier
    */
   public Scan setFamilyMap(Map<byte[], NavigableSet<byte[]>> familyMap) {
     this.familyMap = familyMap;
@@ -719,7 +719,7 @@ public class Scan extends Query {
   }
 
   /**
-   * Getting the familyMap n
+   * Getting the familyMap
    */
   public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
     return this.familyMap;
@@ -794,16 +794,12 @@ public class Scan extends Query {
     return this.caching;
   }
 
-  /**
-   * n
-   */
+  /** Returns TimeRange */
   public TimeRange getTimeRange() {
     return this.tr;
   }
 
-  /**
-   * n
-   */
+  /** Returns RowFilter */
   @Override
   public Filter getFilter() {
     return filter;
@@ -838,7 +834,7 @@ public class Scan extends Query {
    * Set whether this scan is a reversed one
    * <p>
    * This is false by default which means forward(normal) scan.
-   * @param reversed if true, scan will be backward order n
+   * @param reversed if true, scan will be backward order
    */
   public Scan setReversed(boolean reversed) {
     this.reversed = reversed;
@@ -857,7 +853,8 @@ public class Scan extends Query {
    * Setting whether the caller wants to see the partial results when server returns
    * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
    * default this value is false and the complete results will be assembled client side before being
-   * delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow()
+   * delivered to the caller.
+   * @see Result#mayHaveMoreCellsInRow()
    * @see #setBatch(int)
    */
   public Scan setAllowPartialResults(final boolean allowPartialResults) {
@@ -881,7 +878,7 @@ public class Scan extends Query {
 
   /**
    * Compile the table and column family (i.e. schema) information into a String. Useful for parsing
-   * and aggregation by debugging, logging, and administration tools. n
+   * and aggregation by debugging, logging, and administration tools.
    */
   @Override
   public Map<String, Object> getFingerprint() {
@@ -903,7 +900,7 @@ public class Scan extends Query {
    * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
    * Map along with the fingerprinted information. Useful for debugging, logging, and administration
    * tools.
-   * @param maxCols a limit on the number of columns output prior to truncation n
+   * @param maxCols a limit on the number of columns output prior to truncation
    */
   @Override
   public Map<String, Object> toMap(int maxCols) {
@@ -1109,7 +1106,7 @@ public class Scan extends Query {
    * reaches this value.
    * <p>
    * This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
-   * @param limit the limit of rows for this scan n
+   * @param limit the limit of rows for this scan
    */
   public Scan setLimit(int limit) {
     this.limit = limit;
@@ -1118,7 +1115,7 @@ public class Scan extends Query {
 
   /**
    * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
-   * set {@code readType} to {@link ReadType#PREAD}. n
+   * set {@code readType} to {@link ReadType#PREAD}.
    */
   public Scan setOneRowLimit() {
     return setLimit(1).setReadType(ReadType.PREAD);
@@ -1140,7 +1137,7 @@ public class Scan extends Query {
    * Set the read type for this scan.
    * <p>
    * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
-   * example, we will always use pread if this is a get scan. n
+   * example, we will always use pread if this is a get scan.
    */
   public Scan setReadType(ReadType readType) {
     this.readType = readType;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
index aeca91e5bc9..825a58e7bdd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
@@ -99,8 +99,8 @@ public class SecureBulkLoadClient {
   }
 
   /**
-   * Securely bulk load a list of HFiles using client protocol. nnnnnn * @return true if all are
-   * loaded n
+   * Securely bulk load a list of HFiles using client protocol.
+   * @return true if all are loaded
    */
   public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client,
     final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
@@ -110,8 +110,8 @@ public class SecureBulkLoadClient {
   }
 
   /**
-   * Securely bulk load a list of HFiles using client protocol. nnnnnnn * @return true if all are
-   * loaded n
+   * Securely bulk load a list of HFiles using client protocol.
+   * @return true if all are loaded
    */
   public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client,
     final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 79d3572cd61..00c531c4763 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -147,7 +147,8 @@ public interface Table extends Closeable {
    * @param results Empty Object[], same size as actions. Provides access to partial results, in
    *                case an exception is thrown. A null in the result array means that the call for
    *                that action failed, even after retries. The order of the objects in the results
-   *                array corresponds to the order of actions in the request list. n * @since 0.90.0
+   *                array corresponds to the order of actions in the request list.
+   * @since 0.90.0
    */
   default void batch(final List<? extends Row> actions, final Object[] results)
     throws IOException, InterruptedException {
@@ -358,8 +359,8 @@ public interface Table extends Closeable {
    * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
    *          {@link #put(List)} runs pre-flight validations on the input list on client. Currently
    *          {@link #delete(List)} doesn't run validations on the client, there is no need
-   *          currently, but this may change in the future. An * {@link IllegalArgumentException}
-   *          will be thrown in this case.
+   *          currently, but this may change in the future. An {@link IllegalArgumentException} will
+   *          be thrown in this case.
    */
   default void delete(List<Delete> deletes) throws IOException {
     throw new NotImplementedException("Add an implementation!");
@@ -780,12 +781,12 @@ public interface Table extends Closeable {
    * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
    * the invocations to the same region server will be batched into one call. The coprocessor
-   * service is invoked according to the service instance, method name and parameters. n * the
-   * descriptor for the protobuf service method to call. n * the method call parameters n * start
-   * region selection with region containing this row. If {@code null}, the selection will start
-   * with the first table region. n * select regions up to and including the region containing this
-   * row. If {@code null}, selection will continue through the last table region. n * the proto type
-   * of the response of the method in Service.
+   * service is invoked according to the service instance, method name and parameters. the
+   * descriptor for the protobuf service method to call. the method call parameters start region
+   * selection with region containing this row. If {@code null}, the selection will start with the
+   * first table region. select regions up to and including the region containing this row. If
+   * {@code null}, selection will continue through the last table region. the proto type of the
+   * response of the method in Service.
    * @param <R> the response type for the coprocessor Service method
    * @return a map of result values keyed by region name
    */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index e2dc4773d8d..20511e446f0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -940,7 +940,7 @@ public class TableDescriptorBuilder {
     }
 
     /**
-     * Get the name of the table n
+     * Get the name of the table
      */
     @Override
     public TableName getTableName() {
@@ -1353,7 +1353,8 @@ public class TableDescriptorBuilder {
      * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
      * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
      * region is opened.
-     * @param className Full class name. n * @return the modifyable TD
+     * @param className Full class name.
+     * @return the modifyable TD
      */
     public ModifyableTableDescriptor setCoprocessor(String className) throws IOException {
       return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className)
@@ -1401,8 +1402,8 @@ public class TableDescriptorBuilder {
      * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
      * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
      * region is opened.
-     * @param specStr The Coprocessor specification all in in one String n * @return the modifyable
-     *                TD
+     * @param specStr The Coprocessor specification all in in one String
+     * @return the modifyable TD
      * @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed
      *             in HBase 3.0.0.
      */
@@ -1550,8 +1551,8 @@ public class TableDescriptorBuilder {
     /**
      * Parse the serialized representation of a {@link ModifyableTableDescriptor}
      * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix
-     * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code> n
-     *         * @see #toByteArray()
+     * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code>
+     * @see #toByteArray()
      */
     private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
       if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
index 4e20302be45..bf54f6e5904 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
@@ -42,7 +42,7 @@ public class TableState {
 
     /**
      * Covert from PB version of State
-     * @param state convert from n
+     * @param state convert from
      */
     public static State convert(HBaseProtos.TableState.State state) {
       State ret;
@@ -66,7 +66,7 @@ public class TableState {
     }
 
     /**
-     * Covert to PB version of State n
+     * Covert to PB version of State
      */
     public HBaseProtos.TableState.State convert() {
       HBaseProtos.TableState.State state;
@@ -140,7 +140,7 @@ public class TableState {
   }
 
   /**
-   * Table name for state n
+   * Table name for state
    */
   public TableName getTableName() {
     return tableName;
@@ -168,7 +168,7 @@ public class TableState {
   }
 
   /**
-   * Covert to PB version of TableState n
+   * Covert to PB version of TableState
    */
   public HBaseProtos.TableState convert() {
     return HBaseProtos.TableState.newBuilder().setState(this.state.convert()).build();
@@ -177,7 +177,7 @@ public class TableState {
   /**
    * Covert from PB version of TableState
    * @param tableName  table this state of
-   * @param tableState convert from n
+   * @param tableState convert from
    */
   public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) {
     TableState.State state = State.convert(tableState.getState());
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
index a2a53114ac7..cab7eff1516 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
@@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @Deprecated
 class UnmodifyableHRegionInfo extends HRegionInfo {
   /*
-   * Creates an unmodifyable copy of an HRegionInfo n
+   * Creates an unmodifyable copy of an HRegionInfo
    */
   UnmodifyableHRegionInfo(HRegionInfo info) {
     super(info);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
index ab5915ec975..76a0d6addf3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
@@ -33,7 +33,7 @@ public class ServerStatistics {
 
   /**
    * Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, as
-   * something gets set nn
+   * something gets set
    */
   public void update(byte[] region, RegionLoadStats currentStats) {
     RegionStatistics regionStat = this.stats.get(region);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
index 519109934eb..8d75af05cfc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
@@ -35,8 +35,8 @@ public class ServerSideScanMetrics {
   private final Map<String, AtomicLong> counters = new HashMap<>();
 
   /**
-   * Create a new counter with the specified name n * @return {@link AtomicLong} instance for the
-   * counter with counterName
+   * Create a new counter with the specified name
+   * @return {@link AtomicLong} instance for the counter with counterName
    */
   protected AtomicLong createCounter(String counterName) {
     AtomicLong c = new AtomicLong(0);
@@ -75,9 +75,6 @@ public class ServerSideScanMetrics {
    */
   public final AtomicLong countOfRowsScanned = createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME);
 
-  /**
-   * nn
-   */
   public void setCounter(String counterName, long value) {
     AtomicLong c = this.counters.get(counterName);
     if (c != null) {
@@ -85,23 +82,16 @@ public class ServerSideScanMetrics {
     }
   }
 
-  /**
-   * n * @return true if a counter exists with the counterName
-   */
+  /** Returns true if a counter exists with the counterName */
   public boolean hasCounter(String counterName) {
     return this.counters.containsKey(counterName);
   }
 
-  /**
-   * n * @return {@link AtomicLong} instance for this counter name, null if counter does not exist.
-   */
+  /** Returns {@link AtomicLong} instance for this counter name, null if counter does not exist. */
   public AtomicLong getCounter(String counterName) {
     return this.counters.get(counterName);
   }
 
-  /**
-   * nn
-   */
   public void addToCounter(String counterName, long delta) {
     AtomicLong c = this.counters.get(counterName);
     if (c != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 825eec539b1..3133b54657f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -175,7 +175,8 @@ public class ReplicationAdmin implements Closeable {
 
   /**
    * Get the number of slave clusters the local cluster has.
-   * @return number of slave clusters n * @deprecated
+   * @return number of slave clusters
+   * @deprecated
    */
   @Deprecated
   public int getPeersCount() throws IOException {
@@ -219,8 +220,9 @@ public class ReplicationAdmin implements Closeable {
   /**
    * Append the replicable table-cf config of the specified peer
    * @param id       a short that identifies the cluster
-   * @param tableCfs table-cfs config str nn * @deprecated as release of 2.0.0, and it will be
-   *                 removed in 3.0.0, use {@link #appendPeerTableCFs(String, Map)} instead.
+   * @param tableCfs table-cfs config str
+   * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, use
+   *             {@link #appendPeerTableCFs(String, Map)} instead.
    */
   @Deprecated
   public void appendPeerTableCFs(String id, String tableCfs)
@@ -231,7 +233,7 @@ public class ReplicationAdmin implements Closeable {
   /**
    * Append the replicable table-cf config of the specified peer
    * @param id       a short that identifies the cluster
-   * @param tableCfs A map from tableName to column family names nn
+   * @param tableCfs A map from tableName to column family names
    */
   @Deprecated
   public void appendPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs)
@@ -242,8 +244,9 @@ public class ReplicationAdmin implements Closeable {
   /**
    * Remove some table-cfs from table-cfs config of the specified peer
    * @param id      a short name that identifies the cluster
-   * @param tableCf table-cfs config str nn * @deprecated as release of 2.0.0, and it will be
-   *                removed in 3.0.0, use {@link #removePeerTableCFs(String, Map)} instead.
+   * @param tableCf table-cfs config str
+   * @deprecated as release of 2.0.0, and it will be removed in 3.0.0, use
+   *             {@link #removePeerTableCFs(String, Map)} instead.
    */
   @Deprecated
   public void removePeerTableCFs(String id, String tableCf)
@@ -254,7 +257,7 @@ public class ReplicationAdmin implements Closeable {
   /**
    * Remove some table-cfs from config of the specified peer
    * @param id       a short name that identifies the cluster
-   * @param tableCfs A map from tableName to column family names nn
+   * @param tableCfs A map from tableName to column family names
    */
   @Deprecated
   public void removePeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
index 6df88561fdb..02f3e82e6ff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
@@ -51,36 +51,27 @@ import org.apache.yetus.audience.InterfaceStability;
 public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Message,
   R extends Message> {
 
-  /**
-   * nnn * @return value of type T n
-   */
+  /** Returns value of type T */
   public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException;
 
-  /**
-   * nn * @return sum or non null value among (if either of them is null); otherwise returns a null.
-   */
+  /** Returns sum or non null value among (if either of them is null); otherwise returns a null. */
   public abstract S add(S l1, S l2);
 
   /**
-   * returns the maximum value for this type T n
+   * returns the maximum value for this type T
    */
-
   public abstract T getMaxValue();
 
   public abstract T getMinValue();
 
-  /**
-   * nnn
-   */
+  /** Returns multiplication */
   public abstract S multiply(S o1, S o2);
 
-  /**
-   * nn
-   */
+  /** Returns increment */
   public abstract S increment(S o);
 
   /**
-   * provides casting opportunity between the data types. nn
+   * provides casting opportunity between the data types.
    */
   public abstract S castToReturnType(T o);
 
@@ -95,7 +86,7 @@ public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Messa
 
   /**
    * used for computing average of &lt;S&gt; data values. Not providing the divide method that takes
-   * two &lt;S&gt; values as it is not needed as of now. nnn
+   * two &lt;S&gt; values as it is not needed as of now.
    */
   public abstract double divideForAvg(S o, Long l);
 
@@ -111,37 +102,37 @@ public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Messa
 
   /**
    * This method should initialize any field(s) of the ColumnInterpreter with a parsing of the
-   * passed message bytes (used on the server side). n
+   * passed message bytes (used on the server side).
    */
   public abstract void initialize(P msg);
 
   /**
-   * This method gets the PB message corresponding to the cell type n * @return the PB message for
-   * the cell-type instance
+   * This method gets the PB message corresponding to the cell type
+   * @return the PB message for the cell-type instance
    */
   public abstract Q getProtoForCellType(T t);
 
   /**
-   * This method gets the PB message corresponding to the cell type n * @return the cell-type
-   * instance from the PB message
+   * This method gets the PB message corresponding to the cell type
+   * @return the cell-type instance from the PB message
    */
   public abstract T getCellValueFromProto(Q q);
 
   /**
-   * This method gets the PB message corresponding to the promoted type n * @return the PB message
-   * for the promoted-type instance
+   * This method gets the PB message corresponding to the promoted type
+   * @return the PB message for the promoted-type instance
    */
   public abstract R getProtoForPromotedType(S s);
 
   /**
-   * This method gets the promoted type from the proto message n * @return the promoted-type
-   * instance from the PB message
+   * This method gets the promoted type from the proto message
+   * @return the promoted-type instance from the PB message
    */
   public abstract S getPromotedValueFromProto(R r);
 
   /**
    * The response message comes as type S. This will convert/cast it to T. In some sense, performs
-   * the opposite of {@link #castToReturnType(Object)} nn
+   * the opposite of {@link #castToReturnType(Object)}
    */
   public abstract T castToCellType(S response);
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java
index ff9ed066fd4..de8e90ca9ec 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java
@@ -33,7 +33,7 @@ public class CoprocessorException extends DoNotRetryIOException {
   }
 
   /**
-   * Constructor with a Class object and exception message. nn
+   * Constructor with a Class object and exception message.
    */
   public CoprocessorException(Class<?> clazz, String s) {
     super("Coprocessor [" + clazz.getName() + "]: " + s);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
index fd9936dc502..5f2b98c8370 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
@@ -150,7 +150,7 @@ public final class ClientExceptionsUtil {
   /**
    * Translates exception for preemptive fast fail checks.
    * @param t exception to check
-   * @return translated exception n
+   * @return translated exception
    */
   public static Throwable translatePFFE(Throwable t) throws IOException {
     if (t instanceof NoSuchMethodError) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
index ae15777a7f0..00774e37094 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
@@ -35,15 +35,13 @@ public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRet
   }
 
   /**
-   * n
-   */
+   *   */
   public FailedSanityCheckException(String message) {
     super(message);
   }
 
   /**
-   * nn
-   */
+   *   */
   public FailedSanityCheckException(String message, Throwable cause) {
     super(message, cause);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
index e7c06d44aef..1991100d0da 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
@@ -68,9 +68,7 @@ public class ColumnValueFilter extends FilterBase {
     this.comparator = Preconditions.checkNotNull(comparator, "Comparator should not be null");
   }
 
-  /**
-   * n
-   */
+  /** Returns operator */
   public CompareOperator getCompareOperator() {
     return op;
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
index 0a54e991e30..9ed9c526bb9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
@@ -101,8 +101,7 @@ public abstract class CompareFilter extends FilterBase {
   }
 
   /**
-   * n * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()}
-   * instead.
+   * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} instead.
    */
   @Deprecated
   public CompareOp getOperator() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
index cac538dcb8a..29bfce4b07d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
@@ -252,8 +252,9 @@ public abstract class Filter {
 
   /**
    * Concrete implementers can signal a failure condition in their code by throwing an
-   * {@link IOException}. n * @return true if and only if the fields of the filter that are
-   * serialized are equal to the corresponding fields in other. Used for testing.
+   * {@link IOException}.
+   * @return true if and only if the fields of the filter that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   abstract boolean areSerializedFieldsEqual(Filter other);
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
index e66022f6e7d..988725edad5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
@@ -148,9 +148,9 @@ public abstract class FilterBase extends Filter {
   }
 
   /**
-   * Default implementation so that writers of custom filters aren't forced to implement. n
-   * * @return true if and only if the fields of the filter that are serialized are equal to the
-   * corresponding fields in other. Used for testing.
+   * Default implementation so that writers of custom filters aren't forced to implement.
+   * @return true if and only if the fields of the filter that are serialized are equal to the
+   *         corresponding fields in other. Used for testing.
    */
   @Override
   boolean areSerializedFieldsEqual(Filter other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
index 1e00f08757f..4c096bed165 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
@@ -84,7 +84,7 @@ final public class FilterList extends FilterBase {
 
   /**
    * Constructor that takes a var arg number of {@link Filter}s. The default operator MUST_PASS_ALL
-   * is assumed. n
+   * is assumed.
    */
   public FilterList(final Filter... filters) {
     this(Operator.MUST_PASS_ALL, Arrays.asList(filters));
@@ -108,14 +108,14 @@ final public class FilterList extends FilterBase {
   }
 
   /**
-   * Get the operator. n
+   * Get the operator.
    */
   public Operator getOperator() {
     return operator;
   }
 
   /**
-   * Get the filters. n
+   * Get the filters.
    */
   public List<Filter> getFilters() {
     return filterListBase.getFilters();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
index 2d36172064d..1bff5681746 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
@@ -92,7 +92,7 @@ public abstract class FilterListBase extends FilterBase {
    * the current child, we should set the traverse result (transformed cell) of previous node(s) as
    * the initial value. (HBASE-18879).
    * @param c The cell in question.
-   * @return the transformed cell. n
+   * @return the transformed cell.
    */
   @Override
   public Cell transformCell(Cell c) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 36c1fe2cffa..cc09bdb94d7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -138,7 +138,8 @@ public class FuzzyRowFilter extends FilterBase {
 
   /**
    * We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as
-   * fixed positions n * @return mask array
+   * fixed positions
+   * @return mask array
    */
   private byte[] preprocessMask(byte[] mask) {
     if (!UNSAFE_UNALIGNED) {
@@ -630,8 +631,8 @@ public class FuzzyRowFilter extends FilterBase {
 
   /**
    * For forward scanner, next cell hint should not contain any trailing zeroes unless they are part
-   * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01' nn * @param
-   * toInc - position of incremented byte
+   * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01'
+   * @param toInc - position of incremented byte
    * @return trimmed version of result
    */
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
index f7e0281b112..47911f67675 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
@@ -38,7 +38,7 @@ public class RandomRowFilter extends FilterBase {
   protected boolean filterOutRow;
 
   /**
-   * Create a new filter with a specified chance for a row to be included. n
+   * Create a new filter with a specified chance for a row to be included.
    */
   public RandomRowFilter(float chance) {
     this.chance = chance;
@@ -50,7 +50,7 @@ public class RandomRowFilter extends FilterBase {
   }
 
   /**
-   * Set the chance that a row is included. n
+   * Set the chance that a row is included.
    */
   public void setChance(float chance) {
     this.chance = chance;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
index d3f65efac4a..878c803650f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
@@ -109,9 +109,9 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
   }
 
   /**
-   * Constructor for protobuf deserialization only. nnnnnn * @deprecated Since 2.0.0. Will be
-   * removed in 3.0.0. Use
-   * {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
+   * Constructor for protobuf deserialization only.
+   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+   *             {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
    */
   @Deprecated
   protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier,
@@ -122,7 +122,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
   }
 
   /**
-   * Constructor for protobuf deserialization only. nnnnnn
+   * Constructor for protobuf deserialization only.
    */
   protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier,
     final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
index 7048563d0d6..63233e772a1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
@@ -155,10 +155,10 @@ public class SingleColumnValueFilter extends FilterBase {
   }
 
   /**
-   * Constructor for protobuf deserialization only. nnnnnn * @deprecated Since 2.0.0. Will be
-   * removed in 3.0.0. Use
-   * {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
-   * instead.
+   * Constructor for protobuf deserialization only.
+   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+   *             {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
+   *             instead.
    */
   @Deprecated
   protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
@@ -169,7 +169,7 @@ public class SingleColumnValueFilter extends FilterBase {
   }
 
   /**
-   * Constructor for protobuf deserialization only. nnnnnn
+   * Constructor for protobuf deserialization only.
    */
   protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
     final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator,
@@ -180,8 +180,7 @@ public class SingleColumnValueFilter extends FilterBase {
   }
 
   /**
-   * n * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()}
-   * instead.
+   * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} instead.
    */
   @Deprecated
   public CompareOp getOperator() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
index 5b3153649e0..e8dd30ef5dd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
@@ -54,7 +54,7 @@ public class TimestampsFilter extends FilterBase {
   long minTimestamp = Long.MAX_VALUE;
 
   /**
-   * Constructor for filter that retains only the specified timestamps in the list. n
+   * Constructor for filter that retains only the specified timestamps in the list.
    */
   public TimestampsFilter(List<Long> timestamps) {
     this(timestamps, false);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
index b2b3698aa2c..e7364ca3b42 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
@@ -104,9 +104,10 @@ class CellBlockBuilder {
 
   /**
    * Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
-   * <code>compressor</code>. nnn * @return Null or byte buffer filled with a cellblock filled with
-   * passed-in Cells encoded using passed in <code>codec</code> and/or <code>compressor</code>; the
-   * returned buffer has been flipped and is ready for reading. Use limit to find total size. n
+   * <code>compressor</code>.
+   * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
+   *         passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has
+   *         been flipped and is ready for reading. Use limit to find total size.
    */
   public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
     final CellScanner cellScanner) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 0a4d91c3d51..ad3dd00891b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -259,7 +259,7 @@ public final class ProtobufUtil {
    * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than
    * just {@link ServiceException}. Prefer this method to
    * {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf
-   * references. n
+   * references.
    */
   public static IOException handleRemoteException(Exception e) {
     return makeIOExceptionOfException(e);
@@ -363,7 +363,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Get to a client Get
    * @param proto the protocol buffer Get to convert
-   * @return the converted client Get n
+   * @return the converted client Get
    */
   public static Get toGet(final ClientProtos.Get proto) throws IOException {
     if (proto == null) return null;
@@ -448,7 +448,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to a Put.
    * @param proto The protocol buffer MutationProto to convert
-   * @return A client Put. n
+   * @return A client Put.
    */
   public static Put toPut(final MutationProto proto) throws IOException {
     return toPut(proto, null);
@@ -458,7 +458,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Mutate to a Put.
    * @param proto       The protocol buffer MutationProto to convert
    * @param cellScanner If non-null, the Cell data that goes with this proto.
-   * @return A client Put. n
+   * @return A client Put.
    */
   public static Put toPut(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -542,7 +542,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to a Delete
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted client Delete n
+   * @return the converted client Delete
    */
   public static Delete toDelete(final MutationProto proto) throws IOException {
     return toDelete(proto, null);
@@ -552,7 +552,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Mutate to a Delete
    * @param proto       the protocol buffer Mutate to convert
    * @param cellScanner if non-null, the data that goes with this delete.
-   * @return the converted client Delete n
+   * @return the converted client Delete
    */
   public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -716,7 +716,7 @@ public final class ProtobufUtil {
   /**
    * Convert a MutateRequest to Mutation
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted Mutation n
+   * @return the converted Mutation
    */
   public static Mutation toMutation(final MutationProto proto) throws IOException {
     MutationType type = proto.getMutateType();
@@ -737,7 +737,8 @@ public final class ProtobufUtil {
 
   /**
    * Convert a protocol buffer Mutate to a Get.
-   * @param proto the protocol buffer Mutate to convert. n * @return the converted client get. n
+   * @param proto the protocol buffer Mutate to convert.
+   * @return the converted client get.
    */
   public static Get toGet(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -817,7 +818,7 @@ public final class ProtobufUtil {
   /**
    * Convert a client Scan to a protocol buffer Scan
    * @param scan the client Scan to convert
-   * @return the converted protocol buffer Scan n
+   * @return the converted protocol buffer Scan
    */
   public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
     ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
@@ -910,7 +911,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Scan to a client Scan
    * @param proto the protocol buffer Scan to convert
-   * @return the converted client Scan n
+   * @return the converted client Scan
    */
   public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
     byte[] startRow = HConstants.EMPTY_START_ROW;
@@ -1011,7 +1012,7 @@ public final class ProtobufUtil {
   /**
    * Create a protocol buffer Get based on a client Get.
    * @param get the client Get
-   * @return a protocol buffer Get n
+   * @return a protocol buffer Get
    */
   public static ClientProtos.Get toGet(final Get get) throws IOException {
     ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder();
@@ -1076,7 +1077,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n
+   * Create a protocol buffer Mutate based on a client Mutation
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutation(final MutationType type, final Mutation mutation,
     final long nonce) throws IOException {
@@ -1125,8 +1127,8 @@ public final class ProtobufUtil {
 
   /**
    * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
-   * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a
-   * protobuf'd Mutation n
+   * Understanding is that the Cell will be transported other than via protobuf.
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation,
     final MutationProto.Builder builder) throws IOException {
@@ -1135,8 +1137,8 @@ public final class ProtobufUtil {
 
   /**
    * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
-   * Understanding is that the Cell will be transported other than via protobuf. nn * @return a
-   * protobuf'd Mutation n
+   * Understanding is that the Cell will be transported other than via protobuf.
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation)
     throws IOException {
@@ -1162,8 +1164,8 @@ public final class ProtobufUtil {
 
   /**
    * Code shared by {@link #toMutation(MutationType, Mutation)} and
-   * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd
-   * Mutation.
+   * {@link #toMutationNoData(MutationType, Mutation)}
+   * @return A partly-filled out protobuf'd Mutation.
    */
   private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
     final Mutation mutation, MutationProto.Builder builder) {
@@ -1268,7 +1270,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Result to a client Result
    * @param proto   the protocol buffer Result to convert
    * @param scanner Optional cell scanner.
-   * @return the converted client Result n
+   * @return the converted client Result
    */
   public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner)
     throws IOException {
@@ -1382,8 +1384,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer
-   * DeleteType n
+   * Convert a delete KeyValue type to protocol buffer DeleteType.
+   * @return protocol buffer DeleteType
    */
   public static DeleteType toDeleteType(KeyValue.Type type) throws IOException {
     switch (type) {
@@ -1403,7 +1405,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer DeleteType to delete KeyValue type.
    * @param type The DeleteType
-   * @return The type. n
+   * @return The type.
    */
   public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException {
     switch (type) {
@@ -1567,7 +1569,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with byte arrays
    * @param builder current message builder
-   * @param b       byte array n
+   * @param b       byte array
    */
   public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException {
     final CodedInputStream codedInput = CodedInputStream.newInstance(b);
@@ -1580,7 +1582,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with byte arrays
    * @param builder current message builder
-   * @param b       byte array nnn
+   * @param b       byte array
    */
   public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length)
     throws IOException {
@@ -1634,7 +1636,7 @@ public final class ProtobufUtil {
    *             magic and that is then followed by a protobuf that has a serialized
    *             {@link ServerName} in it.
    * @return Returns null if <code>data</code> is null else converts passed data to a ServerName
-   *         instance. n
+   *         instance.
    */
   public static ServerName toServerName(final byte[] data) throws DeserializationException {
     if (data == null || data.length <= 0) return null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
index d63f28cdab8..155c721b98a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
@@ -34,8 +34,7 @@ public class LeaseException extends DoNotRetryIOException {
   }
 
   /**
-   * n
-   */
+   *   */
   public LeaseException(String message) {
     super(message);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
index 2e2a3a895ce..c0330034810 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
@@ -33,8 +33,7 @@ public class FailedLogCloseException extends IOException {
   }
 
   /**
-   * n
-   */
+   *   */
   public FailedLogCloseException(String msg) {
     super(msg);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
index feab0b07f2f..a2a43203b64 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
@@ -32,8 +32,7 @@ public class FailedSyncBeforeLogCloseException extends FailedLogCloseException {
   }
 
   /**
-   * n
-   */
+   *   */
   public FailedSyncBeforeLogCloseException(String msg) {
     super(msg);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
index 7be1ac630e1..b2bf6a4f536 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
@@ -50,7 +50,7 @@ public abstract class AbstractHBaseSaslRpcClient {
    * @param token           token to use if needed by the authentication method
    * @param serverAddr      the address of the hbase service
    * @param securityInfo    the security details for the remote hbase service
-   * @param fallbackAllowed does the client allow fallback to simple authentication n
+   * @param fallbackAllowed does the client allow fallback to simple authentication
    */
   protected AbstractHBaseSaslRpcClient(Configuration conf,
     SaslClientAuthenticationProvider provider, Token<? extends TokenIdentifier> token,
@@ -66,7 +66,7 @@ public abstract class AbstractHBaseSaslRpcClient {
    * @param serverAddr      the address of the hbase service
    * @param securityInfo    the security details for the remote hbase service
    * @param fallbackAllowed does the client allow fallback to simple authentication
-   * @param rpcProtection   the protection level ("authentication", "integrity" or "privacy") n
+   * @param rpcProtection   the protection level ("authentication", "integrity" or "privacy")
    */
   protected AbstractHBaseSaslRpcClient(Configuration conf,
     SaslClientAuthenticationProvider provider, Token<? extends TokenIdentifier> token,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index 5a816877ba8..6c755f9a94c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -62,7 +62,7 @@ public final class EncryptionUtil {
    * @param conf      configuration
    * @param key       the raw key bytes
    * @param algorithm the algorithm to use with this key material
-   * @return the encrypted key bytes n
+   * @return the encrypted key bytes
    */
   public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm)
     throws IOException {
@@ -115,7 +115,7 @@ public final class EncryptionUtil {
    * @param conf    configuration
    * @param subject subject key alias
    * @param value   the encrypted key bytes
-   * @return the raw key bytes nn
+   * @return the raw key bytes
    */
   public static Key unwrapKey(Configuration conf, String subject, byte[] value)
     throws IOException, KeyException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
index 93ad9245f65..0394bb0f2a3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
@@ -86,7 +86,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
    * Do client side SASL authentication with server via the given InputStream and OutputStream
    * @param inS  InputStream to use
    * @param outS OutputStream to use
-   * @return true if connection is set up, or false if needs to switch to simple Auth. n
+   * @return true if connection is set up, or false if needs to switch to simple Auth.
    */
   public boolean saslConnect(InputStream inS, OutputStream outS) throws IOException {
     DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
@@ -185,7 +185,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
 
   /**
    * Get a SASL wrapped InputStream. Can be called only after saslConnect() has been called.
-   * @return a SASL wrapped InputStream n
+   * @return a SASL wrapped InputStream
    */
   public InputStream getInputStream() throws IOException {
     if (!saslClient.isComplete()) {
@@ -248,7 +248,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
 
   /**
    * Get a SASL wrapped OutputStream. Can be called only after saslConnect() has been called.
-   * @return a SASL wrapped OutputStream n
+   * @return a SASL wrapped OutputStream
    */
   public OutputStream getOutputStream() throws IOException {
     if (!saslClient.isComplete()) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
index e30041d46c4..2ea60f8ed57 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
@@ -45,7 +45,7 @@ public class AccessControlClient {
   /**
    * Return true if authorization is supported and enabled
    * @param connection The connection to use
-   * @return true if authorization is supported and enabled, false otherwise n
+   * @return true if authorization is supported and enabled, false otherwise
    */
   public static boolean isAuthorizationEnabled(Connection connection) throws IOException {
     return connection.getAdmin().getSecurityCapabilities()
@@ -55,7 +55,7 @@ public class AccessControlClient {
   /**
    * Return true if cell authorization is supported and enabled
    * @param connection The connection to use
-   * @return true if cell authorization is supported and enabled, false otherwise n
+   * @return true if cell authorization is supported and enabled, false otherwise
    */
   public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException {
     return connection.getAdmin().getSecurityCapabilities()
@@ -146,7 +146,7 @@ public class AccessControlClient {
 
   /**
    * Grant global permissions for the specified user. If permissions for the specified user exists,
-   * later granted permissions will override previous granted permissions. nnnn
+   * later granted permissions will override previous granted permissions.
    */
   public static void grant(Connection connection, final String userName,
     final Permission.Action... actions) throws Throwable {
@@ -162,7 +162,7 @@ public class AccessControlClient {
 
   /**
    * Revokes the permission on the table
-   * @param connection The Connection instance to use nnnnnn
+   * @param connection The Connection instance to use
    */
   public static void revoke(Connection connection, final TableName tableName, final String username,
     final byte[] family, final byte[] qualifier, final Permission.Action... actions)
@@ -173,7 +173,7 @@ public class AccessControlClient {
 
   /**
    * Revokes the permission on the namespace for the specified user.
-   * @param connection The Connection instance to use nnnn
+   * @param connection The Connection instance to use
    */
   public static void revoke(Connection connection, final String namespace, final String userName,
     final Permission.Action... actions) throws Throwable {
@@ -197,7 +197,7 @@ public class AccessControlClient {
    * along with the list of superusers would be returned. Else, no rows get returned.
    * @param connection The Connection instance to use
    * @param tableRegex The regular expression string to match against
-   * @return List of UserPermissions n
+   * @return List of UserPermissions
    */
   public static List<UserPermission> getUserPermissions(Connection connection, String tableRegex)
     throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
index 1fa1bdfd21c..125a7f5e897 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
@@ -447,8 +447,8 @@ public class AccessControlUtil {
    * It's also called by the shell, in case you want to find references.
    * @param protocol      the AccessControlService protocol proxy
    * @param userShortName the short name of the user to grant permissions
-   * @param actions       the permissions to be granted n * @deprecated Use
-   *                      {@link Admin#grant(UserPermission, boolean)} instead.
+   * @param actions       the permissions to be granted
+   * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
    */
   @Deprecated
   public static void grant(RpcController controller,
@@ -475,8 +475,8 @@ public class AccessControlUtil {
    * @param tableName     optional table name
    * @param f             optional column family
    * @param q             optional qualifier
-   * @param actions       the permissions to be granted n * @deprecated Use
-   *                      {@link Admin#grant(UserPermission, boolean)} instead.
+   * @param actions       the permissions to be granted
+   * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
    */
   @Deprecated
   public static void grant(RpcController controller,
@@ -501,8 +501,8 @@ public class AccessControlUtil {
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
    * @param namespace  the short name of the user to grant permissions
-   * @param actions    the permissions to be granted n * @deprecated Use
-   *                   {@link Admin#grant(UserPermission, boolean)} instead.
+   * @param actions    the permissions to be granted
+   * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
    */
   @Deprecated
   public static void grant(RpcController controller,
@@ -618,9 +618,8 @@ public class AccessControlUtil {
    * A utility used to get user's global permissions based on the specified user name.
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
-   * @param userName   User name, if empty then all user permissions will be retrieved. n
-   *                   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
-   *                   instead.
+   * @param userName   User name, if empty then all user permissions will be retrieved.
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -648,8 +647,8 @@ public class AccessControlUtil {
    * It's also called by the shell, in case you want to find references.
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
-   * @param t          optional table name n * @deprecated Use
-   *                   {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+   * @param t          optional table name
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -665,9 +664,8 @@ public class AccessControlUtil {
    * @param t               optional table name
    * @param columnFamily    Column family
    * @param columnQualifier Column qualifier
-   * @param userName        User name, if empty then all user permissions will be retrieved. n
-   *                        * @deprecated Use
-   *                        {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+   * @param userName        User name, if empty then all user permissions will be retrieved.
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -705,8 +703,8 @@ public class AccessControlUtil {
    * It's also called by the shell, in case you want to find references.
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
-   * @param namespace  name of the namespace n * @deprecated Use
-   *                   {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+   * @param namespace  name of the namespace
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -719,9 +717,8 @@ public class AccessControlUtil {
    * @param controller RpcController
    * @param protocol   the AccessControlService protocol proxy
    * @param namespace  name of the namespace
-   * @param userName   User name, if empty then all user permissions will be retrieved. n
-   *                   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
-   *                   instead.
+   * @param userName   User name, if empty then all user permissions will be retrieved.
+   * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
    */
   @Deprecated
   public static List<UserPermission> getUserPermissions(RpcController controller,
@@ -759,8 +756,8 @@ public class AccessControlUtil {
    *                        will not be considered if columnFamily is passed as null or empty.
    * @param userName        User name, it shouldn't be null or empty.
    * @param actions         Actions
-   * @return true if access allowed, otherwise false n * @deprecated Use
-   *         {@link Admin#hasUserPermissions(String, List)} instead.
+   * @return true if access allowed, otherwise false
+   * @deprecated Use {@link Admin#hasUserPermissions(String, List)} instead.
    */
   @Deprecated
   public static boolean hasPermission(RpcController controller,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
index 0c9dc35d75d..39aa5d61497 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
@@ -55,7 +55,7 @@ public class VisibilityClient {
   /**
    * Return true if cell visibility features are supported and enabled
    * @param connection The connection to use
-   * @return true if cell visibility features are supported and enabled, false otherwise n
+   * @return true if cell visibility features are supported and enabled, false otherwise
    */
   public static boolean isCellVisibilityEnabled(Connection connection) throws IOException {
     return connection.getAdmin().getSecurityCapabilities()
@@ -63,8 +63,8 @@ public class VisibilityClient {
   }
 
   /**
-   * Utility method for adding label to the system. nnnn * @deprecated Use
-   * {@link #addLabel(Connection,String)} instead.
+   * Utility method for adding label to the system.
+   * @deprecated Use {@link #addLabel(Connection,String)} instead.
    */
   @Deprecated
   public static VisibilityLabelsResponse addLabel(Configuration conf, final String label)
@@ -75,7 +75,7 @@ public class VisibilityClient {
   }
 
   /**
-   * Utility method for adding label to the system. nnnn
+   * Utility method for adding label to the system.
    */
   public static VisibilityLabelsResponse addLabel(Connection connection, final String label)
     throws Throwable {
@@ -83,8 +83,8 @@ public class VisibilityClient {
   }
 
   /**
-   * Utility method for adding labels to the system. nnnn * @deprecated Use
-   * {@link #addLabels(Connection,String[])} instead.
+   * Utility method for adding labels to the system.
+   * @deprecated Use {@link #addLabels(Connection,String[])} instead.
    */
   @Deprecated
   public static VisibilityLabelsResponse addLabels(Configuration conf, final String[] labels)
@@ -95,7 +95,7 @@ public class VisibilityClient {
   }
 
   /**
-   * Utility method for adding labels to the system. nnnn
+   * Utility method for adding labels to the system.
    */
   public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels)
     throws Throwable {
@@ -133,8 +133,8 @@ public class VisibilityClient {
   }
 
   /**
-   * Sets given labels globally authorized for the user. nnnnn * @deprecated Use
-   * {@link #setAuths(Connection,String[],String)} instead.
+   * Sets given labels globally authorized for the user.
+   * @deprecated Use {@link #setAuths(Connection,String[],String)} instead.
    */
   @Deprecated
   public static VisibilityLabelsResponse setAuths(Configuration conf, final String[] auths,
@@ -145,7 +145,7 @@ public class VisibilityClient {
   }
 
   /**
-   * Sets given labels globally authorized for the user. nnnnn
+   * Sets given labels globally authorized for the user.
    */
   public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths,
     final String user) throws Throwable {
@@ -153,8 +153,8 @@ public class VisibilityClient {
   }
 
   /**
-   * Returns labels, the given user is globally authorized for. n * @deprecated Use
-   * {@link #getAuths(Connection,String)} instead.
+   * Returns labels, the given user is globally authorized for.
+   * @deprecated Use {@link #getAuths(Connection,String)} instead.
    */
   @Deprecated
   public static GetAuthsResponse getAuths(Configuration conf, final String user) throws Throwable {
@@ -216,7 +216,7 @@ public class VisibilityClient {
    * Retrieve the list of visibility labels defined in the system.
    * @param connection The Connection instance to use.
    * @param regex      The regular expression to filter which labels are returned.
-   * @return labels The list of visibility labels defined in the system. n
+   * @return labels The list of visibility labels defined in the system.
    */
   public static ListLabelsResponse listLabels(Connection connection, final String regex)
     throws Throwable {
@@ -252,8 +252,8 @@ public class VisibilityClient {
   }
 
   /**
-   * Removes given labels from user's globally authorized list of labels. nnnnn * @deprecated Use
-   * {@link #clearAuths(Connection,String[],String)} instead.
+   * Removes given labels from user's globally authorized list of labels.
+   * @deprecated Use {@link #clearAuths(Connection,String[],String)} instead.
    */
   @Deprecated
   public static VisibilityLabelsResponse clearAuths(Configuration conf, final String[] auths,
@@ -264,7 +264,7 @@ public class VisibilityClient {
   }
 
   /**
-   * Removes given labels from user's globally authorized list of labels. nnnnn
+   * Removes given labels from user's globally authorized list of labels.
    */
   public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths,
     final String user) throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 1d296229fcf..1c1125f6aea 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -352,7 +352,7 @@ public final class ProtobufUtil {
    * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than
    * just {@link ServiceException}. Prefer this method to
    * {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf
-   * references. n
+   * references.
    */
   public static IOException handleRemoteException(Exception e) {
     return makeIOExceptionOfException(e);
@@ -513,7 +513,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Get to a client Get
    * @param proto the protocol buffer Get to convert
-   * @return the converted client Get n
+   * @return the converted client Get
    */
   public static Get toGet(final ClientProtos.Get proto) throws IOException {
     if (proto == null) return null;
@@ -598,7 +598,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to a Put.
    * @param proto The protocol buffer MutationProto to convert
-   * @return A client Put. n
+   * @return A client Put.
    */
   public static Put toPut(final MutationProto proto) throws IOException {
     return toPut(proto, null);
@@ -608,7 +608,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Mutate to a Put.
    * @param proto       The protocol buffer MutationProto to convert
    * @param cellScanner If non-null, the Cell data that goes with this proto.
-   * @return A client Put. n
+   * @return A client Put.
    */
   public static Put toPut(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -692,7 +692,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Mutate to a Delete
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted client Delete n
+   * @return the converted client Delete
    */
   public static Delete toDelete(final MutationProto proto) throws IOException {
     return toDelete(proto, null);
@@ -702,7 +702,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Mutate to a Delete
    * @param proto       the protocol buffer Mutate to convert
    * @param cellScanner if non-null, the data that goes with this delete.
-   * @return the converted client Delete n
+   * @return the converted client Delete
    */
   public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner)
     throws IOException {
@@ -871,7 +871,7 @@ public final class ProtobufUtil {
   /**
    * Convert a MutateRequest to Mutation
    * @param proto the protocol buffer Mutate to convert
-   * @return the converted Mutation n
+   * @return the converted Mutation
    */
   public static Mutation toMutation(final MutationProto proto) throws IOException {
     MutationType type = proto.getMutateType();
@@ -919,7 +919,7 @@ public final class ProtobufUtil {
   /**
    * Convert a client Scan to a protocol buffer Scan
    * @param scan the client Scan to convert
-   * @return the converted protocol buffer Scan n
+   * @return the converted protocol buffer Scan
    */
   public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
     ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
@@ -1016,7 +1016,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer Scan to a client Scan
    * @param proto the protocol buffer Scan to convert
-   * @return the converted client Scan n
+   * @return the converted client Scan
    */
   public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
     byte[] startRow = HConstants.EMPTY_START_ROW;
@@ -1141,7 +1141,7 @@ public final class ProtobufUtil {
   /**
    * Create a protocol buffer Get based on a client Get.
    * @param get the client Get
-   * @return a protocol buffer Get n
+   * @return a protocol buffer Get
    */
   public static ClientProtos.Get toGet(final Get get) throws IOException {
     ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder();
@@ -1207,7 +1207,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n
+   * Create a protocol buffer Mutate based on a client Mutation
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutation(final MutationType type, final Mutation mutation,
     final long nonce) throws IOException {
@@ -1256,8 +1257,8 @@ public final class ProtobufUtil {
 
   /**
    * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
-   * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a
-   * protobuf'd Mutation n
+   * Understanding is that the Cell will be transported other than via protobuf.
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation,
     final MutationProto.Builder builder) throws IOException {
@@ -1266,8 +1267,8 @@ public final class ProtobufUtil {
 
   /**
    * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
-   * Understanding is that the Cell will be transported other than via protobuf. nn * @return a
-   * protobuf'd Mutation n
+   * Understanding is that the Cell will be transported other than via protobuf.
+   * @return a protobuf'd Mutation
    */
   public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation)
     throws IOException {
@@ -1293,8 +1294,8 @@ public final class ProtobufUtil {
 
   /**
    * Code shared by {@link #toMutation(MutationType, Mutation)} and
-   * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd
-   * Mutation.
+   * {@link #toMutationNoData(MutationType, Mutation)}
+   * @return A partly-filled out protobuf'd Mutation.
    */
   private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
     final Mutation mutation, MutationProto.Builder builder) {
@@ -1427,7 +1428,7 @@ public final class ProtobufUtil {
    * Convert a protocol buffer Result to a client Result
    * @param proto   the protocol buffer Result to convert
    * @param scanner Optional cell scanner.
-   * @return the converted client Result n
+   * @return the converted client Result
    */
   public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner)
     throws IOException {
@@ -1542,8 +1543,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer
-   * DeleteType n
+   * Convert a delete KeyValue type to protocol buffer DeleteType.
+   * @return protocol buffer DeleteType
    */
   public static DeleteType toDeleteType(KeyValue.Type type) throws IOException {
     switch (type) {
@@ -1563,7 +1564,7 @@ public final class ProtobufUtil {
   /**
    * Convert a protocol buffer DeleteType to delete KeyValue type.
    * @param type The DeleteType
-   * @return The type. n
+   * @return The type.
    */
   public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException {
     switch (type) {
@@ -1654,7 +1655,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * A helper to close a region given a region name using admin protocol. nnn
+   * A helper to close a region given a region name using admin protocol.
    */
   public static void closeRegion(final RpcController controller,
     final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName)
@@ -1669,7 +1670,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * A helper to warmup a region given a region name using admin protocol nn *
+   * A helper to warmup a region given a region name using admin protocol
    */
   public static void warmupRegion(final RpcController controller,
     final AdminService.BlockingInterface admin,
@@ -1686,7 +1687,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * A helper to open a region using admin protocol. nnn
+   * A helper to open a region using admin protocol.
    */
   public static void openRegion(final RpcController controller,
     final AdminService.BlockingInterface admin, ServerName server,
@@ -1700,8 +1701,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * A helper to get the all the online regions on a region server using admin protocol. n * @return
-   * a list of online region info n
+   * A helper to get the all the online regions on a region server using admin protocol.
+   * @return a list of online region info
    */
   public static List<org.apache.hadoop.hbase.client.RegionInfo>
     getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException {
@@ -2034,7 +2035,8 @@ public final class ProtobufUtil {
   /**
    * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to
    * NOT print out data both because it can be big but also so we do not have data in our logs. Use
-   * judiciously. n * @return toString of passed <code>m</code>
+   * judiciously.
+   * @return toString of passed <code>m</code>
    */
   public static String getShortTextFormat(Message m) {
     if (m == null) return "null";
@@ -2181,8 +2183,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
-   * client CellVisibility
+   * Convert a protocol buffer CellVisibility to a client CellVisibility
+   * @return the converted client CellVisibility
    */
   public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
     if (proto == null) return null;
@@ -2190,8 +2192,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
-   * converted client CellVisibility n
+   * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
+   * @return the converted client CellVisibility
    */
   public static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException {
     if (protoBytes == null) return null;
@@ -2207,8 +2209,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
-   * protocol buffer CellVisibility
+   * Create a protocol buffer CellVisibility based on a client CellVisibility.
+   * @return a protocol buffer CellVisibility
    */
   public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
     ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@@ -2217,8 +2219,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer Authorizations to a client Authorizations n * @return the converted
-   * client Authorizations
+   * Convert a protocol buffer Authorizations to a client Authorizations
+   * @return the converted client Authorizations
    */
   public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) {
     if (proto == null) return null;
@@ -2226,8 +2228,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer Authorizations bytes to a client Authorizations n * @return the
-   * converted client Authorizations n
+   * Convert a protocol buffer Authorizations bytes to a client Authorizations
+   * @return the converted client Authorizations
    */
   public static Authorizations toAuthorizations(byte[] protoBytes) throws DeserializationException {
     if (protoBytes == null) return null;
@@ -2243,8 +2245,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Create a protocol buffer Authorizations based on a client Authorizations. n * @return a
-   * protocol buffer Authorizations
+   * Create a protocol buffer Authorizations based on a client Authorizations.
+   * @return a protocol buffer Authorizations
    */
   public static ClientProtos.Authorizations toAuthorizations(Authorizations authorizations) {
     ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder();
@@ -2255,8 +2257,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer TimeUnit to a client TimeUnit n * @return the converted client
-   * TimeUnit
+   * Convert a protocol buffer TimeUnit to a client TimeUnit
+   * @return the converted client TimeUnit
    */
   public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) {
     switch (proto) {
@@ -2279,8 +2281,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a client TimeUnit to a protocol buffer TimeUnit n * @return the converted protocol
-   * buffer TimeUnit
+   * Convert a client TimeUnit to a protocol buffer TimeUnit
+   * @return the converted protocol buffer TimeUnit
    */
   public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) {
     switch (timeUnit) {
@@ -2303,8 +2305,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer ThrottleType to a client ThrottleType n * @return the converted
-   * client ThrottleType
+   * Convert a protocol buffer ThrottleType to a client ThrottleType
+   * @return the converted client ThrottleType
    */
   public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) {
     switch (proto) {
@@ -2332,8 +2334,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a client ThrottleType to a protocol buffer ThrottleType n * @return the converted
-   * protocol buffer ThrottleType
+   * Convert a client ThrottleType to a protocol buffer ThrottleType
+   * @return the converted protocol buffer ThrottleType
    */
   public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) {
     switch (type) {
@@ -2361,8 +2363,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer QuotaScope to a client QuotaScope n * @return the converted client
-   * QuotaScope
+   * Convert a protocol buffer QuotaScope to a client QuotaScope
+   * @return the converted client QuotaScope
    */
   public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) {
     switch (proto) {
@@ -2375,8 +2377,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a client QuotaScope to a protocol buffer QuotaScope n * @return the converted protocol
-   * buffer QuotaScope
+   * Convert a client QuotaScope to a protocol buffer QuotaScope
+   * @return the converted protocol buffer QuotaScope
    */
   public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) {
     switch (scope) {
@@ -2389,8 +2391,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a protocol buffer QuotaType to a client QuotaType n * @return the converted client
-   * QuotaType
+   * Convert a protocol buffer QuotaType to a client QuotaType
+   * @return the converted client QuotaType
    */
   public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) {
     switch (proto) {
@@ -2403,8 +2405,8 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a client QuotaType to a protocol buffer QuotaType n * @return the converted protocol
-   * buffer QuotaType
+   * Convert a client QuotaType to a protocol buffer QuotaType
+   * @return the converted protocol buffer QuotaType
    */
   public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) {
     switch (type) {
@@ -2531,7 +2533,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding
    * buffers
    * @param builder current message builder
-   * @param in      Inputsream with delimited protobuf data n
+   * @param in      Inputsream with delimited protobuf data
    */
   public static void mergeDelimitedFrom(Message.Builder builder, InputStream in)
     throws IOException {
@@ -2553,7 +2555,7 @@ public final class ProtobufUtil {
    * where the message size is known
    * @param builder current message builder
    * @param in      InputStream containing protobuf data
-   * @param size    known size of protobuf data n
+   * @param size    known size of protobuf data
    */
   public static void mergeFrom(Message.Builder builder, InputStream in, int size)
     throws IOException {
@@ -2567,7 +2569,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers
    * where the message size is not known
    * @param builder current message builder
-   * @param in      InputStream containing protobuf data n
+   * @param in      InputStream containing protobuf data
    */
   public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException {
     final CodedInputStream codedInput = CodedInputStream.newInstance(in);
@@ -2580,7 +2582,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with ByteStrings
    * @param builder current message builder
-   * @param bs      ByteString containing the n
+   * @param bs      ByteString containing the
    */
   public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException {
     final CodedInputStream codedInput = bs.newCodedInput();
@@ -2593,7 +2595,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with byte arrays
    * @param builder current message builder
-   * @param b       byte array n
+   * @param b       byte array
    */
   public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException {
     final CodedInputStream codedInput = CodedInputStream.newInstance(b);
@@ -2606,7 +2608,7 @@ public final class ProtobufUtil {
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
    * working with byte arrays
    * @param builder current message builder
-   * @param b       byte array nnn
+   * @param b       byte array
    */
   public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length)
     throws IOException {
@@ -2778,7 +2780,7 @@ public final class ProtobufUtil {
 
   /**
    * Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state
-   * @param state the protobuf CompactionState n
+   * @param state the protobuf CompactionState
    */
   public static CompactionState createCompactionState(GetRegionInfoResponse.CompactionState state) {
     return CompactionState.valueOf(state.toString());
@@ -2790,7 +2792,7 @@ public final class ProtobufUtil {
 
   /**
    * Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state
-   * @param state the protobuf CompactionState n
+   * @param state the protobuf CompactionState
    */
   public static CompactionState
     createCompactionStateForRegionLoad(RegionLoad.CompactionState state) {
@@ -2895,9 +2897,7 @@ public final class ProtobufUtil {
       stats.getCompactionPressure());
   }
 
-  /**
-   * n * @return A String version of the passed in <code>msg</code>
-   */
+  /** Returns A String version of the passed in <code>msg</code> */
   public static String toText(Message msg) {
     return TextFormat.shortDebugString(msg);
   }
@@ -2907,7 +2907,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. n
+   * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it.
    */
   public static <T> T call(Callable<T> callable) throws IOException {
     try {
@@ -3018,7 +3018,7 @@ public final class ProtobufUtil {
    *             magic and that is then followed by a protobuf that has a serialized
    *             {@link ServerName} in it.
    * @return Returns null if <code>data</code> is null else converts passed data to a ServerName
-   *         instance. n
+   *         instance.
    */
   public static ServerName parseServerNameFrom(final byte[] data) throws DeserializationException {
     if (data == null || data.length <= 0) return null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 73e080297a4..9bd29f37ff9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -193,7 +193,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for a client increment nnnnnn * @return a mutate request
+   * Create a protocol buffer MutateRequest for a client increment
+   * @return a mutate request
    */
   public static MutateRequest buildIncrementRequest(final byte[] regionName, final byte[] row,
     final byte[] family, final byte[] qualifier, final long amount, final Durability durability,
@@ -227,7 +228,7 @@ public final class RequestConverter {
 
   /**
    * Create a protocol buffer MutateRequest for a conditioned put/delete/increment/append
-   * @return a mutate request n
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final byte[] row,
     final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value,
@@ -247,7 +248,7 @@ public final class RequestConverter {
 
   /**
    * Create a protocol buffer MultiRequest for conditioned row mutations
-   * @return a multi request n
+   * @return a multi request
    */
   public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName,
     final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op,
@@ -304,7 +305,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for a put nn * @return a mutate request n
+   * Create a protocol buffer MutateRequest for a put
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final Put put)
     throws IOException {
@@ -316,7 +318,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for an append nn * @return a mutate request n
+   * Create a protocol buffer MutateRequest for an append
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final Append append,
     long nonceGroup, long nonce) throws IOException {
@@ -332,7 +335,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for a client increment nn * @return a mutate request
+   * Create a protocol buffer MutateRequest for a client increment
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final Increment increment,
     final long nonceGroup, final long nonce) throws IOException {
@@ -348,7 +352,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MutateRequest for a delete nn * @return a mutate request n
+   * Create a protocol buffer MutateRequest for a delete
+   * @return a mutate request
    */
   public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete)
     throws IOException {
@@ -368,7 +373,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer ScanRequest for a client Scan nnnn * @return a scan request n
+   * Create a protocol buffer ScanRequest for a client Scan
+   * @return a scan request
    */
   public static ScanRequest buildScanRequest(byte[] regionName, Scan scan, int numberOfRows,
     boolean closeScanner) throws IOException {
@@ -388,7 +394,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer ScanRequest for a scanner id nnn * @return a scan request
+   * Create a protocol buffer ScanRequest for a scanner id
+   * @return a scan request
    */
   public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
     boolean trackMetrics) {
@@ -403,7 +410,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer ScanRequest for a scanner id nnnn * @return a scan request
+   * Create a protocol buffer ScanRequest for a scanner id
+   * @return a scan request
    */
   public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
     long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) {
@@ -423,7 +431,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer bulk load request nnnnn * @return a bulk load request
+   * Create a protocol buffer bulk load request
+   * @return a bulk load request
    */
   public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
     final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
@@ -433,7 +442,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer bulk load request nnnnnn * @return a bulk load request
+   * Create a protocol buffer bulk load request
+   * @return a bulk load request
    */
   public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
     final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
@@ -490,7 +500,7 @@ public final class RequestConverter {
    * @param mutationBuilder     mutationBuilder to be used to build mutation.
    * @param nonceGroup          nonceGroup to be applied.
    * @param indexMap            Map of created RegionAction to the original index for a
-   *                            RowMutations/CheckAndMutate within the original list of actions n
+   *                            RowMutations/CheckAndMutate within the original list of actions
    */
   public static void buildRegionActions(final byte[] regionName, final List<Action> actions,
     final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder,
@@ -653,7 +663,7 @@ public final class RequestConverter {
    * @param mutationBuilder     mutationBuilder to be used to build mutation.
    * @param nonceGroup          nonceGroup to be applied.
    * @param indexMap            Map of created RegionAction to the original index for a
-   *                            RowMutations/CheckAndMutate within the original list of actions n
+   *                            RowMutations/CheckAndMutate within the original list of actions
    */
   public static void buildNoDataRegionActions(final byte[] regionName,
     final Iterable<Action> actions, final List<CellScannable> cells,
@@ -1021,7 +1031,8 @@ public final class RequestConverter {
   /**
    * Create a CompactRegionRequest for a given region name
    * @param regionName the name of the region to get info
-   * @param major      indicator if it is a major compaction n * @return a CompactRegionRequest
+   * @param major      indicator if it is a major compaction
+   * @return a CompactRegionRequest
    */
   public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
     byte[] columnFamily) {
@@ -1079,7 +1090,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer AddColumnRequest nn * @return an AddColumnRequest
+   * Create a protocol buffer AddColumnRequest
+   * @return an AddColumnRequest
    */
   public static AddColumnRequest buildAddColumnRequest(final TableName tableName,
     final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -1092,7 +1104,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer DeleteColumnRequest nn * @return a DeleteColumnRequest
+   * Create a protocol buffer DeleteColumnRequest
+   * @return a DeleteColumnRequest
    */
   public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName,
     final byte[] columnName, final long nonceGroup, final long nonce) {
@@ -1105,7 +1118,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer ModifyColumnRequest nn * @return an ModifyColumnRequest
+   * Create a protocol buffer ModifyColumnRequest
+   * @return an ModifyColumnRequest
    */
   public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName,
     final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -1131,7 +1145,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer MoveRegionRequest nn * @return A MoveRegionRequest
+   * Create a protocol buffer MoveRegionRequest
+   * @return A MoveRegionRequest
    */
   public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName,
     ServerName destServerName) {
@@ -1172,7 +1187,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Create a protocol buffer AssignRegionRequest n * @return an AssignRegionRequest
+   * Create a protocol buffer AssignRegionRequest
+   * @return an AssignRegionRequest
    */
   public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) {
     AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder();
@@ -1181,7 +1197,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer UnassignRegionRequest n * @return an UnassignRegionRequest
+   * Creates a protocol buffer UnassignRegionRequest
+   * @return an UnassignRegionRequest
    */
   public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) {
     UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder();
@@ -1190,7 +1207,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer OfflineRegionRequest n * @return an OfflineRegionRequest
+   * Creates a protocol buffer OfflineRegionRequest
+   * @return an OfflineRegionRequest
    */
   public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) {
     OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder();
@@ -1199,7 +1217,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer DeleteTableRequest n * @return a DeleteTableRequest
+   * Creates a protocol buffer DeleteTableRequest
+   * @return a DeleteTableRequest
    */
   public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName,
     final long nonceGroup, final long nonce) {
@@ -1227,7 +1246,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer EnableTableRequest n * @return an EnableTableRequest
+   * Creates a protocol buffer EnableTableRequest
+   * @return an EnableTableRequest
    */
   public static EnableTableRequest buildEnableTableRequest(final TableName tableName,
     final long nonceGroup, final long nonce) {
@@ -1239,7 +1259,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer DisableTableRequest n * @return a DisableTableRequest
+   * Creates a protocol buffer DisableTableRequest
+   * @return a DisableTableRequest
    */
   public static DisableTableRequest buildDisableTableRequest(final TableName tableName,
     final long nonceGroup, final long nonce) {
@@ -1251,7 +1272,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer CreateTableRequest nn * @return a CreateTableRequest
+   * Creates a protocol buffer CreateTableRequest
+   * @return a CreateTableRequest
    */
   public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor,
     final byte[][] splitKeys, final long nonceGroup, final long nonce) {
@@ -1268,7 +1290,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer ModifyTableRequest nn * @return a ModifyTableRequest
+   * Creates a protocol buffer ModifyTableRequest
+   * @return a ModifyTableRequest
    */
   public static ModifyTableRequest buildModifyTableRequest(final TableName tableName,
     final TableDescriptor tableDesc, final long nonceGroup, final long nonce) {
@@ -1292,7 +1315,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer GetSchemaAlterStatusRequest n * @return a GetSchemaAlterStatusRequest
+   * Creates a protocol buffer GetSchemaAlterStatusRequest
+   * @return a GetSchemaAlterStatusRequest
    */
   public static GetSchemaAlterStatusRequest
     buildGetSchemaAlterStatusRequest(final TableName tableName) {
@@ -1302,7 +1326,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer GetTableDescriptorsRequest n * @return a GetTableDescriptorsRequest
+   * Creates a protocol buffer GetTableDescriptorsRequest
+   * @return a GetTableDescriptorsRequest
    */
   public static GetTableDescriptorsRequest
     buildGetTableDescriptorsRequest(final List<TableName> tableNames) {
@@ -1409,7 +1434,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer SetBalancerRunningRequest nn * @return a SetBalancerRunningRequest
+   * Creates a protocol buffer SetBalancerRunningRequest
+   * @return a SetBalancerRunningRequest
    */
   public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on,
     boolean synchronous) {
@@ -1494,8 +1520,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a request for querying the master the last flushed sequence Id for a region n * @return
-   * A {@link GetLastFlushedSequenceIdRequest}
+   * Creates a request for querying the master the last flushed sequence Id for a region
+   * @return A {@link GetLastFlushedSequenceIdRequest}
    */
   public static GetLastFlushedSequenceIdRequest
     buildGetLastFlushedSequenceIdRequest(byte[] regionName) {
@@ -1546,7 +1572,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer SetNormalizerRunningRequest n * @return a SetNormalizerRunningRequest
+   * Creates a protocol buffer SetNormalizerRunningRequest
+   * @return a SetNormalizerRunningRequest
    */
   public static SetNormalizerRunningRequest buildSetNormalizerRunningRequest(boolean on) {
     return SetNormalizerRunningRequest.newBuilder().setOn(on).build();
@@ -1654,7 +1681,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer CreateNamespaceRequest n * @return a CreateNamespaceRequest
+   * Creates a protocol buffer CreateNamespaceRequest
+   * @return a CreateNamespaceRequest
    */
   public static CreateNamespaceRequest
     buildCreateNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1664,7 +1692,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer ModifyNamespaceRequest n * @return a ModifyNamespaceRequest
+   * Creates a protocol buffer ModifyNamespaceRequest
+   * @return a ModifyNamespaceRequest
    */
   public static ModifyNamespaceRequest
     buildModifyNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1674,7 +1703,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer DeleteNamespaceRequest n * @return a DeleteNamespaceRequest
+   * Creates a protocol buffer DeleteNamespaceRequest
+   * @return a DeleteNamespaceRequest
    */
   public static DeleteNamespaceRequest buildDeleteNamespaceRequest(final String name) {
     DeleteNamespaceRequest.Builder builder = DeleteNamespaceRequest.newBuilder();
@@ -1683,8 +1713,8 @@ public final class RequestConverter {
   }
 
   /**
-   * Creates a protocol buffer GetNamespaceDescriptorRequest n * @return a
-   * GetNamespaceDescriptorRequest
+   * Creates a protocol buffer GetNamespaceDescriptorRequest
+   * @return a GetNamespaceDescriptorRequest
    */
   public static GetNamespaceDescriptorRequest
     buildGetNamespaceDescriptorRequest(final String name) {
@@ -1801,7 +1831,7 @@ public final class RequestConverter {
 
   /**
    * Creates IsSnapshotCleanupEnabledRequest to determine if auto snapshot cleanup based on TTL
-   * expiration is turned on n
+   * expiration is turned on
    */
   public static IsSnapshotCleanupEnabledRequest buildIsSnapshotCleanupEnabledRequest() {
     return IsSnapshotCleanupEnabledRequest.newBuilder().build();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
index 0a81db1cfb8..180698864fd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
@@ -90,7 +90,7 @@ public final class ResponseConverter {
    * @param request  the original protocol buffer MultiRequest
    * @param response the protocol buffer MultiResponse to convert
    * @param cells    Cells to go with the passed in <code>proto</code>. Can be null.
-   * @return the results that were in the MultiResponse (a Result or an Exception). n
+   * @return the results that were in the MultiResponse (a Result or an Exception).
    */
   public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
     final MultiResponse response, final CellScanner cells) throws IOException {
@@ -103,7 +103,7 @@ public final class ResponseConverter {
    * @param indexMap Used to support RowMutations/CheckAndMutate in batch
    * @param response the protocol buffer MultiResponse to convert
    * @param cells    Cells to go with the passed in <code>proto</code>. Can be null.
-   * @return the results that were in the MultiResponse (a Result or an Exception). n
+   * @return the results that were in the MultiResponse (a Result or an Exception).
    */
   public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
     final Map<Integer, Integer> indexMap, final MultiResponse response, final CellScanner cells)
@@ -265,7 +265,8 @@ public final class ResponseConverter {
   }
 
   /**
-   * Wrap a throwable to an action result. n * @return an action result builder
+   * Wrap a throwable to an action result.
+   * @return an action result builder
    */
   public static ResultOrException.Builder buildActionResult(final Throwable t) {
     ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -274,7 +275,8 @@ public final class ResponseConverter {
   }
 
   /**
-   * Wrap a throwable to an action result. n * @return an action result builder
+   * Wrap a throwable to an action result.
+   * @return an action result builder
    */
   public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r) {
     ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -282,9 +284,7 @@ public final class ResponseConverter {
     return builder;
   }
 
-  /**
-   * n * @return NameValuePair of the exception name to stringified version os exception.
-   */
+  /** Returns NameValuePair of the exception name to stringified version os exception. */
   public static NameBytesPair buildException(final Throwable t) {
     NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder();
     parameterBuilder.setName(t.getClass().getName());
@@ -325,7 +325,8 @@ public final class ResponseConverter {
   }
 
   /**
-   * A utility to build a GetServerInfoResponse. nn * @return the response
+   * A utility to build a GetServerInfoResponse.
+   * @return the response
    */
   public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName,
     final int webuiPort) {
@@ -340,7 +341,8 @@ public final class ResponseConverter {
   }
 
   /**
-   * A utility to build a GetOnlineRegionResponse. n * @return the response
+   * A utility to build a GetOnlineRegionResponse.
+   * @return the response
    */
   public static GetOnlineRegionResponse
     buildGetOnlineRegionResponse(final List<RegionInfo> regions) {
@@ -424,7 +426,7 @@ public final class ResponseConverter {
   }
 
   /**
-   * Create Results from the cells using the cells meta data. nnn
+   * Create Results from the cells using the cells meta data.
    */
   public static Result[] getResults(CellScanner cellScanner, ScanResponse response)
     throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
index 091515c325e..2787b5ab7f9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
@@ -91,7 +91,7 @@ public class Writables {
    * @return The passed Writable after its readFields has been called fed by the passed
    *         <code>bytes</code> array or IllegalArgumentException if passed null or an empty
    *         <code>bytes</code> array.
-   * @throws IOException e n
+   * @throws IOException e
    */
   public static Writable getWritable(final byte[] bytes, final Writable w) throws IOException {
     return getWritable(bytes, 0, bytes.length, w);
@@ -107,7 +107,7 @@ public class Writables {
    * @return The passed Writable after its readFields has been called fed by the passed
    *         <code>bytes</code> array or IllegalArgumentException if passed null or an empty
    *         <code>bytes</code> array.
-   * @throws IOException e n
+   * @throws IOException e
    */
   public static Writable getWritable(final byte[] bytes, final int offset, final int length,
     final Writable w) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
index 4208ea113e4..4811da28776 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
@@ -148,7 +148,7 @@ public class ZNodePaths {
 
   /**
    * Parses the meta replicaId from the passed path.
-   * @param path the name of the full path which includes baseZNode. n
+   * @param path the name of the full path which includes baseZNode.
    */
   public int getMetaReplicaIdFromPath(String path) {
     // Extract the znode from path. The prefix is of the following format.
@@ -159,7 +159,7 @@ public class ZNodePaths {
 
   /**
    * Parse the meta replicaId from the passed znode
-   * @param znode the name of the znode, does not include baseZNode n
+   * @param znode the name of the znode, does not include baseZNode
    */
   public int getMetaReplicaIdFromZNode(String znode) {
     return znode.equals(metaZNodePrefix)
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index 77a3b838a9b..e6b27d53d34 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -212,7 +212,7 @@ public class TestClientNoCluster extends Configured implements Tool {
   }
 
   /**
-   * Test that operation timeout prevails over rpc default timeout and retries, etc. n
+   * Test that operation timeout prevails over rpc default timeout and retries, etc.
    */
   @Test
   public void testRpcTimeout() throws IOException {
@@ -600,8 +600,8 @@ public class TestClientNoCluster extends Configured implements Tool {
   }
 
   /**
-   * @param name region name or encoded region name. n * @return True if we are dealing with a
-   *             hbase:meta region.
+   * @param name region name or encoded region name.
+   * @return True if we are dealing with a hbase:meta region.
    */
   static boolean isMetaRegion(final byte[] name, final RegionSpecifierType type) {
     switch (type) {
@@ -656,8 +656,9 @@ public class TestClientNoCluster extends Configured implements Tool {
 
   /**
    * Format passed integer. Zero-pad. Copied from hbase-server PE class and small amendment. Make
-   * them share. n * @return Returns zero-prefixed 10-byte wide decimal version of passed number
-   * (Does absolute in case number is negative).
+   * them share.
+   * @return Returns zero-prefixed 10-byte wide decimal version of passed number (Does absolute in
+   *         case number is negative).
    */
   private static byte[] format(final long number) {
     byte[] b = new byte[10];
@@ -669,9 +670,7 @@ public class TestClientNoCluster extends Configured implements Tool {
     return b;
   }
 
-  /**
-   * nn * @return <code>count</code> regions
-   */
+  /** Returns <code>count</code> regions */
   private static HRegionInfo[] makeHRegionInfos(final byte[] tableName, final int count,
     final long namespaceSpan) {
     byte[] startKey = HConstants.EMPTY_BYTE_ARRAY;
@@ -691,9 +690,7 @@ public class TestClientNoCluster extends Configured implements Tool {
     return hris;
   }
 
-  /**
-   * n * @return Return <code>count</code> servernames.
-   */
+  /** Returns Return <code>count</code> servernames. */
   private static ServerName[] makeServerNames(final int count) {
     ServerName[] sns = new ServerName[count];
     for (int i = 0; i < count; i++) {
@@ -736,7 +733,7 @@ public class TestClientNoCluster extends Configured implements Tool {
   }
 
   /**
-   * Code for each 'client' to run. nnnn
+   * Code for each 'client' to run.
    */
   static void cycle(int id, final Configuration c, final Connection sharedConnection)
     throws IOException {
@@ -853,7 +850,7 @@ public class TestClientNoCluster extends Configured implements Tool {
 
   /**
    * Run a client instance against a faked up server.
-   * @param args TODO n
+   * @param args TODO
    */
   public static void main(String[] args) throws Exception {
     System.exit(ToolRunner.run(HBaseConfiguration.create(), new TestClientNoCluster(), args));
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
index cc329cd3d03..cce3ba4e4e3 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
@@ -42,7 +42,7 @@ public class TestDeleteTimeStamp {
   private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier");
 
   /*
-   * Test for verifying that the timestamp in delete object is being honored. n
+   * Test for verifying that the timestamp in delete object is being honored.
    */
   @Test
   public void testTimeStamp() {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index 27cf51e7c9f..d7eef52a4f9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -100,7 +100,8 @@ public final class AuthUtil {
   /**
    * For kerberized cluster, return login user (from kinit or from keytab if specified). For
    * non-kerberized cluster, return system user.
-   * @param conf configuartion file n * @throws IOException login exception
+   * @param conf configuartion file
+   * @throws IOException login exception
    */
   @InterfaceAudience.Private
   public static User loginClient(Configuration conf) throws IOException {
@@ -160,7 +161,8 @@ public final class AuthUtil {
    * <p>
    * NOT recommend to use to method unless you're sure what you're doing, it is for canary only.
    * Please use User#loginClient.
-   * @param conf configuration file n * @throws IOException login exception
+   * @param conf configuration file
+   * @throws IOException login exception
    */
   private static User loginClientAsService(Configuration conf) throws IOException {
     UserProvider provider = UserProvider.instantiate(conf);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
index e5050b864ca..a29a98a8c09 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
@@ -54,7 +54,7 @@ public class ByteBufferKeyOnlyKeyValue extends ByteBufferExtendedCell {
 
   /**
    * A setter that helps to avoid object creation every time and whenever there is a need to create
-   * new OffheapKeyOnlyKeyValue. nnn
+   * new OffheapKeyOnlyKeyValue.
    */
   public void setKey(ByteBuffer key, int offset, int length) {
     setKey(key, offset, length, ByteBufferUtils.toShort(key, offset));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
index 28128ee37c6..677ed2295ce 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
@@ -48,7 +48,7 @@ public interface CellBuilder {
   Cell build();
 
   /**
-   * Remove all internal elements from builder. n
+   * Remove all internal elements from builder.
    */
   CellBuilder clear();
 }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index b90eab3a4c2..aebf654a832 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -662,9 +662,8 @@ public class CellComparatorImpl implements CellComparator {
   /**
    * Compares the row part of the cell with a simple plain byte[] like the stopRow in Scan. This
    * should be used with context where for hbase:meta cells the
-   * {{@link MetaCellComparator#META_COMPARATOR} should be used n * the cell to be compared n * the
-   * kv serialized byte[] to be compared with n * the offset in the byte[] n * the length in the
-   * byte[]
+   * {{@link MetaCellComparator#META_COMPARATOR} should be used the cell to be compared the kv
+   * serialized byte[] to be compared with the offset in the byte[] the length in the byte[]
    * @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger than byte[], -1
    *         otherwise
    */
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index b518b0e3aa2..56af3c58e89 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -584,9 +584,7 @@ public final class CellUtil {
     return PrivateCellUtil.createCell(cell, value, tags);
   }
 
-  /**
-   * Return CellScanner interface over <code>cellIterables</code>
-   */
+  /** Returns CellScanner interface over <code>cellIterables</code> */
   public static CellScanner
     createCellScanner(final List<? extends CellScannable> cellScannerables) {
     return new CellScanner() {
@@ -612,16 +610,14 @@ public final class CellUtil {
     };
   }
 
-  /**
-   * Return CellScanner interface over <code>cellIterable</code>
-   */
+  /** Returns CellScanner interface over <code>cellIterable</code> */
   public static CellScanner createCellScanner(final Iterable<Cell> cellIterable) {
     if (cellIterable == null) return null;
     return createCellScanner(cellIterable.iterator());
   }
 
   /**
-   * Return CellScanner interface over <code>cellIterable</code> or null if <code>cells</code> is
+   * Returns CellScanner interface over <code>cellIterable</code> or null if <code>cells</code> is
    * null
    */
   public static CellScanner createCellScanner(final Iterator<Cell> cells) {
@@ -644,9 +640,7 @@ public final class CellUtil {
     };
   }
 
-  /**
-   * Return CellScanner interface over <code>cellArray</code>
-   */
+  /** Returns CellScanner interface over <code>cellArray</code> */
   public static CellScanner createCellScanner(final Cell[] cellArray) {
     return new CellScanner() {
       private final Cell[] cells = cellArray;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
index ddbf71cac13..432556d2642 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
@@ -145,7 +145,7 @@ public class CompoundConfiguration extends Configuration {
   /**
    * Add Bytes map to config list. This map is generally created by HTableDescriptor or
    * HColumnDescriptor, but can be abstractly used. The added configuration overrides the previous
-   * ones if there are name collisions. n * Bytes map
+   * ones if there are name collisions. Bytes map
    * @return this, for builder pattern
    */
   public CompoundConfiguration addBytesMap(final Map<Bytes, Bytes> map) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
index b3b7a1c5e57..28e648ec466 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
@@ -41,7 +41,7 @@ public interface ExtendedCell extends RawCell, HeapSize {
    * &lt;tags&gt;</code>
    * @param out      Stream to which cell has to be written
    * @param withTags Whether to write tags.
-   * @return how many bytes are written. n
+   * @return how many bytes are written.
    */
   // TODO remove the boolean param once HBASE-16706 is done.
   default int write(OutputStream out, boolean withTags) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
index 62df02c71de..ebee82a437f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
@@ -235,7 +235,7 @@ public class HBaseConfiguration extends Configuration {
    * @param conf    configuration instance for accessing the passwords
    * @param alias   the name of the password element
    * @param defPass the default password
-   * @return String password or default password n
+   * @return String password or default password
    */
   public static String getPassword(Configuration conf, String alias, String defPass)
     throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index d7c6e5f455e..46543ca9197 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -1840,8 +1840,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
      * Compare columnFamily, qualifier, timestamp, and key type (everything except the row). This
      * method is used both in the normal comparator and the "same-prefix" comparator. Note that we
      * are assuming that row portions of both KVs have already been parsed and found identical, and
-     * we don't validate that assumption here. n * the length of the common prefix of the two
-     * key-values being compared, including row length and row
+     * we don't validate that assumption here. the length of the common prefix of the two key-values
+     * being compared, including row length and row
      */
     private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, int llength,
       byte[] right, int roffset, int rlength, short rowlength) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
index 4291d904fe8..ed3687e9ed4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
@@ -55,8 +55,8 @@ public class KeyValueTestUtil {
 
   /**
    * Checks whether KeyValues from kvCollection2 are contained in kvCollection1. The comparison is
-   * made without distinguishing MVCC version of the KeyValues nn * @return true if KeyValues from
-   * kvCollection2 are contained in kvCollection1
+   * made without distinguishing MVCC version of the KeyValues
+   * @return true if KeyValues from kvCollection2 are contained in kvCollection1
    */
   public static boolean containsIgnoreMvccVersion(Collection<? extends Cell> kvCollection1,
     Collection<? extends Cell> kvCollection2) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index cb75a1f66a9..b1b59af7bf3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -58,8 +58,8 @@ public class KeyValueUtil {
 
   /**
    * Returns number of bytes this cell's key part would have been used if serialized as in
-   * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type. n * @return the
-   * key length
+   * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type.
+   * @return the key length
    */
   public static int keyLength(final Cell cell) {
     return keyLength(cell.getRowLength(), cell.getFamilyLength(), cell.getQualifierLength());
@@ -96,8 +96,8 @@ public class KeyValueUtil {
   }
 
   /**
-   * The position will be set to the beginning of the new ByteBuffer n * @return the Bytebuffer
-   * containing the key part of the cell
+   * The position will be set to the beginning of the new ByteBuffer
+   * @return the Bytebuffer containing the key part of the cell
    */
   public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
     byte[] bytes = new byte[keyLength(cell)];
@@ -107,8 +107,8 @@ public class KeyValueUtil {
   }
 
   /**
-   * Copies the key to a new KeyValue n * @return the KeyValue that consists only the key part of
-   * the incoming cell
+   * Copies the key to a new KeyValue
+   * @return the KeyValue that consists only the key part of the incoming cell
    */
   public static KeyValue toNewKeyCell(final Cell cell) {
     byte[] bytes = new byte[keyLength(cell)];
@@ -203,7 +203,7 @@ public class KeyValueUtil {
 
   /**
    * Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
-   * position to the start of the next KeyValue. Does not allocate a new array or copy data. nnn
+   * position to the start of the next KeyValue. Does not allocate a new array or copy data.
    */
   public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion,
     boolean includesTags) {
@@ -236,7 +236,8 @@ public class KeyValueUtil {
 
   /**
    * Decrement the timestamp. For tests (currently wasteful) Remember timestamps are sorted reverse
-   * chronologically. n * @return previous key
+   * chronologically.
+   * @return previous key
    */
   public static KeyValue previousKey(final KeyValue in) {
     return createFirstOnRow(CellUtil.cloneRow(in), CellUtil.cloneFamily(in),
@@ -246,9 +247,8 @@ public class KeyValueUtil {
   /**
    * Create a KeyValue for the specified row, family and qualifier that would be larger than or
    * equal to all other possible KeyValues that have the same row, family, qualifier. Used for
-   * reseeking. Should NEVER be returned to a client. n * row key n * row offset n * row length n *
-   * family name n * family offset n * family length n * column qualifier n * qualifier offset n *
-   * qualifier length
+   * reseeking. Should NEVER be returned to a client. row key row offset row length family name
+   * family offset family length column qualifier qualifier offset qualifier length
    * @return Last possible key on passed row, family, qualifier.
    */
   public static KeyValue createLastOnRow(final byte[] row, final int roffset, final int rlength,
@@ -408,11 +408,11 @@ public class KeyValueUtil {
 
   /*************** misc **********************************/
   /**
-   * n * @return <code>cell</code> if it is an object of class {@link KeyValue} else we will return
-   * a new {@link KeyValue} instance made from <code>cell</code> Note: Even if the cell is an object
-   * of any of the subclass of {@link KeyValue}, we will create a new {@link KeyValue} object
-   * wrapping same buffer. This API is used only with MR based tools which expect the type to be
-   * exactly KeyValue. That is the reason for doing this way.
+   * @return <code>cell</code> if it is an object of class {@link KeyValue} else we will return a
+   *         new {@link KeyValue} instance made from <code>cell</code> Note: Even if the cell is an
+   *         object of any of the subclass of {@link KeyValue}, we will create a new
+   *         {@link KeyValue} object wrapping same buffer. This API is used only with MR based tools
+   *         which expect the type to be exactly KeyValue. That is the reason for doing this way.
    * @deprecated without any replacement.
    */
   @Deprecated
@@ -444,8 +444,9 @@ public class KeyValueUtil {
   }
 
   /**
-   * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. nn
-   * * @return Length written on stream n * @see #create(DataInput) for the inverse function
+   * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
+   * @return Length written on stream
+   * @see #create(DataInput) for the inverse function
    */
   public static long write(final KeyValue kv, final DataOutput out) throws IOException {
     // This is how the old Writables write used to serialize KVs. Need to figure
@@ -639,7 +640,7 @@ public class KeyValueUtil {
    * @param in       inputStream to read.
    * @param withTags whether the keyvalue should include tags are not
    * @return Created KeyValue OR if we find a length of zero, we will return null which can be
-   *         useful marking a stream as done. n
+   *         useful marking a stream as done.
    */
   public static KeyValue createKeyValueFromInputStream(InputStream in, boolean withTags)
     throws IOException {
@@ -663,24 +664,24 @@ public class KeyValueUtil {
   }
 
   /**
-   * n * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
-   * hfile index members to KeyValues.
+   * Returns a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+   * index members to KeyValues.
    */
   public static KeyValue createKeyValueFromKey(final byte[] b) {
     return createKeyValueFromKey(b, 0, b.length);
   }
 
   /**
-   * n * @return A KeyValue made of a byte buffer that holds the key-only part. Needed to convert
-   * hfile index members to KeyValues.
+   * Return a KeyValue made of a byte buffer that holds the key-only part. Needed to convert hfile
+   * index members to KeyValues.
    */
   public static KeyValue createKeyValueFromKey(final ByteBuffer bb) {
     return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit());
   }
 
   /**
-   * nnn * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
-   * hfile index members to KeyValues.
+   * Return a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+   * index members to KeyValues.
    */
   public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final int l) {
     byte[] newb = new byte[l + KeyValue.ROW_OFFSET];
@@ -691,19 +692,19 @@ public class KeyValueUtil {
   }
 
   /**
-   * n * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied
-   * from the steam.
+   * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied from
+   * the steam.
    * @return KeyValue created by deserializing from <code>in</code> OR if we find a length of zero,
-   *         we will return null which can be useful marking a stream as done. n
+   *         we will return null which can be useful marking a stream as done.
    */
   public static KeyValue create(final DataInput in) throws IOException {
     return create(in.readInt(), in);
   }
 
   /**
-   * Create a KeyValue reading <code>length</code> from <code>in</code> nn * @return Created
-   * KeyValue OR if we find a length of zero, we will return null which can be useful marking a
-   * stream as done. n
+   * Create a KeyValue reading <code>length</code> from <code>in</code>
+   * @return Created KeyValue OR if we find a length of zero, we will return null which can be
+   *         useful marking a stream as done.
    */
   public static KeyValue create(int length, final DataInput in) throws IOException {
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
index c34c8d5a298..83de4312cc8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
@@ -1019,7 +1019,7 @@ public final class PrivateCellUtil {
    * Writes the row from the given cell to the output stream excluding the common prefix
    * @param out     The dataoutputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param rlength the row length n
+   * @param rlength the row length
    */
   public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength,
     int commonPrefix) throws IOException {
@@ -1207,7 +1207,6 @@ public final class PrivateCellUtil {
 
   /**
    * Compares only the key portion of a cell. It does not include the sequence id/mvcc of the cell
-   * nn
    * @return an int greater than 0 if left &gt; than right lesser than 0 if left &lt; than right
    *         equal to 0 if left is equal to right
    */
@@ -2168,7 +2167,7 @@ public final class PrivateCellUtil {
   /**
    * Writes the Cell's key part as it would have serialized in a KeyValue. The format is &lt;2 bytes
    * rk len&gt;&lt;rk&gt;&lt;1 byte cf len&gt;&lt;cf&gt;&lt;qualifier&gt;&lt;8 bytes
-   * timestamp&gt;&lt;1 byte type&gt; nnn
+   * timestamp&gt;&lt;1 byte type&gt;
    */
   public static void writeFlatKey(Cell cell, DataOutput out) throws IOException {
     short rowLen = cell.getRowLength();
@@ -2200,7 +2199,7 @@ public final class PrivateCellUtil {
   /**
    * Deep clones the given cell if the cell supports deep cloning
    * @param cell the cell to be cloned
-   * @return the cloned cell n
+   * @return the cloned cell
    */
   public static Cell deepClone(Cell cell) throws CloneNotSupportedException {
     if (cell instanceof ExtendedCell) {
@@ -2214,7 +2213,7 @@ public final class PrivateCellUtil {
    * @param cell     the cell to be written
    * @param out      the outputstream
    * @param withTags if tags are to be written or not
-   * @return the total bytes written n
+   * @return the total bytes written
    */
   public static int writeCell(Cell cell, OutputStream out, boolean withTags) throws IOException {
     if (cell instanceof ExtendedCell) {
@@ -2289,8 +2288,8 @@ public final class PrivateCellUtil {
 
   /**
    * Sets the given seqId to the cell. Marked as audience Private as of 1.2.0. Setting a Cell
-   * sequenceid is an internal implementation detail not for general public use. nn * @throws
-   * IOException when the passed cell is not of type {@link ExtendedCell}
+   * sequenceid is an internal implementation detail not for general public use.
+   * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    */
   public static void setSequenceId(Cell cell, long seqId) throws IOException {
     if (cell instanceof ExtendedCell) {
@@ -2302,8 +2301,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Sets the given timestamp to the cell. nn * @throws IOException when the passed cell is not of
-   * type {@link ExtendedCell}
+   * Sets the given timestamp to the cell.
+   * @throws IOException when the passed cell is not of type {@link ExtendedCell}
    */
   public static void setTimestamp(Cell cell, long ts) throws IOException {
     if (cell instanceof ExtendedCell) {
@@ -2359,7 +2358,7 @@ public final class PrivateCellUtil {
    * Writes the row from the given cell to the output stream
    * @param out     The outputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param rlength the row length n
+   * @param rlength the row length
    */
   public static void writeRow(OutputStream out, Cell cell, short rlength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2374,7 +2373,7 @@ public final class PrivateCellUtil {
    * Writes the family from the given cell to the output stream
    * @param out     The outputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param flength the family length n
+   * @param flength the family length
    */
   public static void writeFamily(OutputStream out, Cell cell, byte flength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2389,7 +2388,7 @@ public final class PrivateCellUtil {
    * Writes the qualifier from the given cell to the output stream
    * @param out     The outputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param qlength the qualifier length n
+   * @param qlength the qualifier length
    */
   public static void writeQualifier(OutputStream out, Cell cell, int qlength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2405,7 +2404,7 @@ public final class PrivateCellUtil {
    * Writes the qualifier from the given cell to the output stream excluding the common prefix
    * @param out     The dataoutputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param qlength the qualifier length n
+   * @param qlength the qualifier length
    */
   public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell, int qlength,
     int commonPrefix) throws IOException {
@@ -2424,7 +2423,7 @@ public final class PrivateCellUtil {
    * Writes the value from the given cell to the output stream
    * @param out     The outputstream to which the data has to be written
    * @param cell    The cell whose contents has to be written
-   * @param vlength the value length n
+   * @param vlength the value length
    */
   public static void writeValue(OutputStream out, Cell cell, int vlength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2439,7 +2438,7 @@ public final class PrivateCellUtil {
    * Writes the tag from the given cell to the output stream
    * @param out        The outputstream to which the data has to be written
    * @param cell       The cell whose contents has to be written
-   * @param tagsLength the tag length n
+   * @param tagsLength the tag length
    */
   public static void writeTags(OutputStream out, Cell cell, int tagsLength) throws IOException {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2472,7 +2471,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the rowkey bytes of the given cell into an int value n * @return rowkey as int
+   * Converts the rowkey bytes of the given cell into an int value
+   * @return rowkey as int
    */
   public static int getRowAsInt(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2483,7 +2483,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the value bytes of the given cell into a long value n * @return value as long
+   * Converts the value bytes of the given cell into a long value
+   * @return value as long
    */
   public static long getValueAsLong(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2494,7 +2495,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the value bytes of the given cell into a int value n * @return value as int
+   * Converts the value bytes of the given cell into a int value
+   * @return value as int
    */
   public static int getValueAsInt(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2505,7 +2507,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the value bytes of the given cell into a double value n * @return value as double
+   * Converts the value bytes of the given cell into a double value
+   * @return value as double
    */
   public static double getValueAsDouble(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2516,7 +2519,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Converts the value bytes of the given cell into a BigDecimal n * @return value as BigDecimal
+   * Converts the value bytes of the given cell into a BigDecimal
+   * @return value as BigDecimal
    */
   public static BigDecimal getValueAsBigDecimal(Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2737,8 +2741,9 @@ public final class PrivateCellUtil {
   /**
    * Estimate based on keyvalue's serialization format in the RPC layer. Note that there is an extra
    * SIZEOF_INT added to the size here that indicates the actual length of the cell for cases where
-   * cell's are serialized in a contiguous format (For eg in RPCs). n * @return Estimate of the
-   * <code>cell</code> size in bytes plus an extra SIZEOF_INT indicating the actual cell length.
+   * cell's are serialized in a contiguous format (For eg in RPCs).
+   * @return Estimate of the <code>cell</code> size in bytes plus an extra SIZEOF_INT indicating the
+   *         actual cell length.
    */
   public static int estimatedSerializedSizeOf(final Cell cell) {
     return cell.getSerializedSize() + Bytes.SIZEOF_INT;
@@ -2758,9 +2763,9 @@ public final class PrivateCellUtil {
   /**
    * This method exists just to encapsulate how we serialize keys. To be replaced by a factory that
    * we query to figure what the Cell implementation is and then, what serialization engine to use
-   * and further, how to serialize the key for inclusion in hfile index. TODO. n * @return The key
-   * portion of the Cell serialized in the old-school KeyValue way or null if passed a null
-   * <code>cell</code>
+   * and further, how to serialize the key for inclusion in hfile index. TODO.
+   * @return The key portion of the Cell serialized in the old-school KeyValue way or null if passed
+   *         a null <code>cell</code>
    */
   public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) {
     if (cell == null) return null;
@@ -2770,8 +2775,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Create a Cell that is smaller than all other possible Cells for the given Cell's row. n
-   * * @return First possible Cell on passed Cell's row.
+   * Create a Cell that is smaller than all other possible Cells for the given Cell's row.
+   * @return First possible Cell on passed Cell's row.
    */
   public static Cell createFirstOnRow(final Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2835,8 +2840,8 @@ public final class PrivateCellUtil {
 
   /**
    * Create a Cell that is smaller than all other possible Cells for the given Cell's rk:cf and
-   * passed qualifier. nnnn * @return Last possible Cell on passed Cell's rk:cf and passed
-   * qualifier.
+   * passed qualifier.
+   * @return Last possible Cell on passed Cell's rk:cf and passed qualifier.
    */
   public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2856,7 +2861,7 @@ public final class PrivateCellUtil {
    * Creates the first cell with the row/family/qualifier of this cell and the given timestamp. Uses
    * the "maximum" type that guarantees that the new cell is the lowest possible for this
    * combination of row, family, qualifier, and timestamp. This cell's own timestamp is ignored.
-   * @param cell - cell n
+   * @param cell - cell
    */
   public static Cell createFirstOnRowColTS(Cell cell, long ts) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2874,8 +2879,8 @@ public final class PrivateCellUtil {
   }
 
   /**
-   * Create a Cell that is larger than all other possible Cells for the given Cell's row. n
-   * * @return Last possible Cell on passed Cell's row.
+   * Create a Cell that is larger than all other possible Cells for the given Cell's row.
+   * @return Last possible Cell on passed Cell's row.
    */
   public static Cell createLastOnRow(final Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
@@ -2892,7 +2897,8 @@ public final class PrivateCellUtil {
   /**
    * Create a Cell that is larger than all other possible Cells for the given Cell's rk:cf:q. Used
    * in creating "fake keys" for the multi-column Bloom filter optimization to skip the row/column
-   * we already know is not in the file. n * @return Last possible Cell on passed Cell's rk:cf:q.
+   * we already know is not in the file.
+   * @return Last possible Cell on passed Cell's rk:cf:q.
    */
   public static Cell createLastOnRowCol(final Cell cell) {
     if (cell instanceof ByteBufferExtendedCell) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
index be8e4e769ba..9a2a29356b1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
@@ -95,7 +95,7 @@ public abstract class BaseDecoder implements Codec.Decoder {
   /**
    * Extract a Cell.
    * @return a parsed Cell or throws an Exception. EOFException or a generic IOException maybe
-   *         thrown if EOF is reached prematurely. Does not return null. n
+   *         thrown if EOF is reached prematurely. Does not return null.
    */
   @NonNull
   protected abstract Cell parseCell() throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
index e7facdbfbf2..f4552c03826 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
@@ -62,7 +62,7 @@ public class CellCodec implements Codec {
     }
 
     /**
-     * Write int length followed by array bytes. nnnn
+     * Write int length followed by array bytes.
      */
     private void write(final byte[] bytes, final int offset, final int length) throws IOException {
       // TODO add BB backed os check and do for write. Pass Cell
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
index 75e3d48d9fa..07bfb53d5df 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
@@ -65,7 +65,7 @@ public class CellCodecWithTags implements Codec {
     }
 
     /**
-     * Write int length followed by array bytes. nnnn
+     * Write int length followed by array bytes.
      */
     private void write(final byte[] bytes, final int offset, final int length) throws IOException {
       this.out.write(Bytes.toBytes(length));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
index 86a2fefae7a..2b21546a72a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
@@ -82,7 +82,7 @@ public class ByteBufferOutputStream extends OutputStream implements ByteBufferWr
   }
 
   /**
-   * This flips the underlying BB so be sure to use it _last_! n
+   * This flips the underlying BB so be sure to use it _last_!
    */
   public ByteBuffer getByteBuffer() {
     curBuf.flip();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferWriterOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferWriterOutputStream.java
index 0dee9e22ade..9c27d90ec26 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferWriterOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferWriterOutputStream.java
@@ -61,8 +61,8 @@ public class ByteBufferWriterOutputStream extends OutputStream implements ByteBu
    * position of the ByteBuffer.
    * @param b   the ByteBuffer
    * @param off the start offset in the data
-   * @param len the number of bytes to write n * if an I/O error occurs. In particular, an
-   *            IOException is thrown if the output stream is closed.
+   * @param len the number of bytes to write if an I/O error occurs. In particular, an IOException
+   *            is thrown if the output stream is closed.
    */
   @Override
   public void write(ByteBuffer b, int off, int len) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
index 1613bd563d0..d1310137e8c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
@@ -38,14 +38,14 @@ public interface CellOutputStream {
    * Implementation must copy the entire state of the Cell. If the written Cell is modified
    * immediately after the write method returns, the modifications must have absolutely no effect on
    * the copy of the Cell that was added in the write.
-   * @param cell Cell to write out n
+   * @param cell Cell to write out
    */
   void write(Cell cell) throws IOException;
 
   /**
    * Let the implementation decide what to do. Usually means writing accumulated data into a byte[]
    * that can then be read from the implementation to be sent to disk, put in the block cache, or
-   * sent over the network. n
+   * sent over the network.
    */
   void flush() throws IOException;
 }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
index 924cbf21e47..6557bbd02c8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
@@ -169,8 +169,9 @@ public class ImmutableBytesWritable implements WritableComparable<ImmutableBytes
   }
 
   /**
-   * Compares the bytes in this object to the specified byte array n * @return Positive if left is
-   * bigger than right, 0 if they are equal, and negative if left is smaller than right.
+   * Compares the bytes in this object to the specified byte array
+   * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
+   *         smaller than right.
    */
   public int compareTo(final byte[] that) {
     return WritableComparator.compareBytes(this.bytes, this.offset, this.length, that, 0,
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
index af9126d942d..74b0f2db108 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
@@ -57,7 +57,7 @@ public class TagCompressionContext {
    * @param out    Stream to which the compressed tags to be written
    * @param in     Source where tags are available
    * @param offset Offset for the tags bytes
-   * @param length Length of all tag bytes n
+   * @param length Length of all tag bytes
    */
   public void compressTags(OutputStream out, byte[] in, int offset, int length) throws IOException {
     int pos = offset;
@@ -76,7 +76,7 @@ public class TagCompressionContext {
    * @param out    Stream to which the compressed tags to be written
    * @param in     Source buffer where tags are available
    * @param offset Offset for the tags byte buffer
-   * @param length Length of all tag bytes n
+   * @param length Length of all tag bytes
    */
   public void compressTags(OutputStream out, ByteBuffer in, int offset, int length)
     throws IOException {
@@ -101,7 +101,7 @@ public class TagCompressionContext {
    * @param src    Stream where the compressed tags are available
    * @param dest   Destination array where to write the uncompressed tags
    * @param offset Offset in destination where tags to be written
-   * @param length Length of all tag bytes n
+   * @param length Length of all tag bytes
    */
   public void uncompressTags(InputStream src, byte[] dest, int offset, int length)
     throws IOException {
@@ -133,7 +133,7 @@ public class TagCompressionContext {
    * @param dest   Destination array where to write the uncompressed tags
    * @param offset Offset in destination where tags to be written
    * @param length Length of all tag bytes
-   * @return bytes count read from source to uncompress all tags. n
+   * @return bytes count read from source to uncompress all tags.
    */
   public int uncompressTags(ByteBuff src, byte[] dest, int offset, int length) throws IOException {
     int srcBeginPos = src.position();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
index 09647b4ce91..f0152968162 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
@@ -86,7 +86,7 @@ public abstract class Cipher {
    * @param out     the output stream to wrap
    * @param context the encryption context
    * @param iv      initialization vector
-   * @return the encrypting wrapper n
+   * @return the encrypting wrapper
    */
   public abstract OutputStream createEncryptionStream(OutputStream out, Context context, byte[] iv)
     throws IOException;
@@ -95,7 +95,7 @@ public abstract class Cipher {
    * Create an encrypting output stream given an initialized encryptor
    * @param out       the output stream to wrap
    * @param encryptor the encryptor
-   * @return the encrypting wrapper n
+   * @return the encrypting wrapper
    */
   public abstract OutputStream createEncryptionStream(OutputStream out, Encryptor encryptor)
     throws IOException;
@@ -105,7 +105,7 @@ public abstract class Cipher {
    * @param in      the input stream to wrap
    * @param context the encryption context
    * @param iv      initialization vector
-   * @return the decrypting wrapper n
+   * @return the decrypting wrapper
    */
   public abstract InputStream createDecryptionStream(InputStream in, Context context, byte[] iv)
     throws IOException;
@@ -114,7 +114,7 @@ public abstract class Cipher {
    * Create a decrypting output stream given an initialized decryptor
    * @param in        the input stream to wrap
    * @param decryptor the decryptor
-   * @return the decrypting wrapper n
+   * @return the decrypting wrapper
    */
   public abstract InputStream createDecryptionStream(InputStream in, Decryptor decryptor)
     throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
index 0d29fe990b9..93822784594 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface Decryptor {
 
   /**
-   * Set the secret key n
+   * Set the secret key
    */
   public void setKey(Key key);
 
@@ -45,12 +45,12 @@ public interface Decryptor {
   public int getBlockSize();
 
   /**
-   * Set the initialization vector n
+   * Set the initialization vector
    */
   public void setIv(byte[] iv);
 
   /**
-   * Create a stream for decryption n
+   * Create a stream for decryption
    */
   public InputStream createDecryptionStream(InputStream in);
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 761fe04d6fc..13e335b82ee 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -318,7 +318,7 @@ public final class Encryption {
    * <p>
    * The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
    * @param out ciphertext
-   * @param src plaintext nnnn
+   * @param src plaintext
    */
   public static void encrypt(OutputStream out, byte[] src, int offset, int length, Encryptor e)
     throws IOException {
@@ -333,7 +333,7 @@ public final class Encryption {
   /**
    * Encrypt a block of plaintext
    * @param out ciphertext
-   * @param src plaintext nnnnn
+   * @param src plaintext
    */
   public static void encrypt(OutputStream out, byte[] src, int offset, int length, Context context,
     byte[] iv) throws IOException {
@@ -349,7 +349,7 @@ public final class Encryption {
    * <p>
    * The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
    * @param out ciphertext
-   * @param in  plaintext nn
+   * @param in  plaintext
    */
   public static void encrypt(OutputStream out, InputStream in, Encryptor e) throws IOException {
     OutputStream cout = e.createEncryptionStream(out);
@@ -363,7 +363,7 @@ public final class Encryption {
   /**
    * Encrypt a stream of plaintext given a context and IV
    * @param out ciphertext
-   * @param in  plaintet nnn
+   * @param in  plaintet
    */
   public static void encrypt(OutputStream out, InputStream in, Context context, byte[] iv)
     throws IOException {
@@ -378,7 +378,6 @@ public final class Encryption {
    * Decrypt a block of ciphertext read in from a stream with the given cipher and context
    * <p>
    * The decryptor's state will be finalized. It should be reinitialized or returned to the pool.
-   * nnnnnn
    */
   public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize, Decryptor d)
     throws IOException {
@@ -391,7 +390,7 @@ public final class Encryption {
   }
 
   /**
-   * Decrypt a block of ciphertext from a stream given a context and IV nnnnnnn
+   * Decrypt a block of ciphertext from a stream given a context and IV
    */
   public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize,
     Context context, byte[] iv) throws IOException {
@@ -402,7 +401,7 @@ public final class Encryption {
   }
 
   /**
-   * Decrypt a stream of ciphertext given a decryptor nnnnn
+   * Decrypt a stream of ciphertext given a decryptor
    */
   public static void decrypt(OutputStream out, InputStream in, int outLen, Decryptor d)
     throws IOException {
@@ -425,7 +424,7 @@ public final class Encryption {
   }
 
   /**
-   * Decrypt a stream of ciphertext given a context and IV nnnnnn
+   * Decrypt a stream of ciphertext given a context and IV
    */
   public static void decrypt(OutputStream out, InputStream in, int outLen, Context context,
     byte[] iv) throws IOException {
@@ -436,7 +435,8 @@ public final class Encryption {
   }
 
   /**
-   * Resolves a key for the given subject nn * @return a key for the given subject
+   * Resolves a key for the given subject
+   * @return a key for the given subject
    * @throws IOException if the key is not found
    */
   public static Key getSecretKeyForSubject(String subject, Configuration conf) throws IOException {
@@ -460,7 +460,7 @@ public final class Encryption {
    * @param in     plaintext
    * @param conf   configuration
    * @param cipher the encryption algorithm
-   * @param iv     the initialization vector, can be null n
+   * @param iv     the initialization vector, can be null
    */
   public static void encryptWithSubjectKey(OutputStream out, InputStream in, String subject,
     Configuration conf, Cipher cipher, byte[] iv) throws IOException {
@@ -482,7 +482,7 @@ public final class Encryption {
    * @param subject the subject's key alias
    * @param conf    configuration
    * @param cipher  the encryption algorithm
-   * @param iv      the initialization vector, can be null n
+   * @param iv      the initialization vector, can be null
    */
   public static void decryptWithSubjectKey(OutputStream out, InputStream in, int outLen,
     String subject, Configuration conf, Cipher cipher, byte[] iv) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
index f030de3e174..34f0fa4c0f7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface Encryptor {
 
   /**
-   * Set the secret key n
+   * Set the secret key
    */
   public void setKey(Key key);
 
@@ -50,12 +50,12 @@ public interface Encryptor {
   public byte[] getIv();
 
   /**
-   * Set the initialization vector n
+   * Set the initialization vector
    */
   public void setIv(byte[] iv);
 
   /**
-   * Create a stream for encryption n
+   * Create a stream for encryption
    */
   public OutputStream createEncryptionStream(OutputStream out);
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
index 6c6ec5dd759..0852bc7f13f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
@@ -31,13 +31,13 @@ public interface KeyProvider {
   public static final String PASSWORDFILE = "passwordfile";
 
   /**
-   * Initialize the key provider n
+   * Initialize the key provider
    */
   public void init(String params);
 
   /**
-   * Retrieve the key for a given key aliase n * @return the keys corresponding to the supplied
-   * alias, or null if a key is not found
+   * Retrieve the key for a given key aliase
+   * @return the keys corresponding to the supplied alias, or null if a key is not found
    */
   public Key getKey(String alias);
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
index 7f13b2c6f66..52825b6c683 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
@@ -90,9 +90,8 @@ public interface DataBlockEncoder {
   EncodedSeeker createSeeker(HFileBlockDecodingContext decodingCtx);
 
   /**
-   * Creates a encoder specific encoding context n * store configuration n * encoding strategy used
-   * n * header bytes to be written, put a dummy header here if the header is unknown n * HFile meta
-   * data
+   * Creates a encoder specific encoding context store configuration encoding strategy used header
+   * bytes to be written, put a dummy header here if the header is unknown HFile meta data
    * @return a newly created encoding context
    */
   HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf,
@@ -100,7 +99,7 @@ public interface DataBlockEncoder {
 
   /**
    * Creates an encoder specific decoding context, which will prepare the data before actual
-   * decoding n * store configuration n * HFile meta data
+   * decoding store configuration HFile meta data
    * @return a newly created decoding context
    */
   HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, HFileContext meta);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
index 21f6c92ef35..4eba8fd854e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
@@ -99,7 +99,7 @@ public enum DataBlockEncoding {
   /**
    * Writes id bytes to the given array starting from offset.
    * @param dest   output array
-   * @param offset starting offset of the output array n
+   * @param offset starting offset of the output array
    */
   // System.arraycopy is static native. Nothing we can do this until we have minimum JDK 9.
   @SuppressWarnings("UnsafeFinalization")
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
index 7750728108d..e9ca3031e3b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
@@ -181,7 +181,7 @@ public class EncodedDataBlock {
    * @param inputBuffer Array to be compressed.
    * @param offset      Offset to beginning of the data.
    * @param length      Length to be compressed.
-   * @return Size of compressed data in bytes. n
+   * @return Size of compressed data in bytes.
    */
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH_EXCEPTION",
       justification = "No sure what findbugs wants but looks to me like no NPE")
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
index 6835a8bac3c..63f173c38cc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
@@ -32,9 +32,9 @@ public interface HFileBlockDecodingContext {
   /**
    * Perform all actions that need to be done before the encoder's real decoding process.
    * Decompression needs to be done if {@link HFileContext#getCompression()} returns a valid
-   * compression algorithm. n * numBytes after block and encoding headers n * numBytes without
-   * header required to store the block after decompressing (not decoding) n * ByteBuffer pointed
-   * after the header but before the data n * on disk data to be decoded
+   * compression algorithm. numBytes after block and encoding headers numBytes without header
+   * required to store the block after decompressing (not decoding) ByteBuffer pointed after the
+   * header but before the data on disk data to be decoded
    */
   void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader,
     ByteBuff blockBufferWithoutHeader, ByteBuff onDiskBlock) throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
index ed97147ac9b..ad193cad613 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
@@ -91,7 +91,7 @@ public enum IndexBlockEncoding {
   /**
    * Writes id bytes to the given array starting from offset.
    * @param dest   output array
-   * @param offset starting offset of the output array n
+   * @param offset starting offset of the output array
    */
   public void writeIdInBytes(byte[] dest, int offset) throws IOException {
     System.arraycopy(idInBytes, 0, dest, offset, ID_SIZE);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
index 5a61622101b..a2e63b9fda0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
@@ -93,7 +93,8 @@ public class ThrottledInputStream extends InputStream {
 
   /**
    * Read bytes starting from the specified position. This requires rawStream is an instance of
-   * {@link PositionedReadable}. nnnn * @return the number of bytes read
+   * {@link PositionedReadable}.
+   * @return the number of bytes read
    */
   public int read(long position, byte[] buffer, int offset, int length) throws IOException {
     if (!(rawStream instanceof PositionedReadable)) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
index 157df98a9b0..b1ab8a9b28d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
@@ -80,7 +80,7 @@ public interface Dictionary {
    * @param data   the data to be written in byte[]
    * @param offset the offset
    * @param length length to be written
-   * @param dict   the dictionary whose contents are to written n
+   * @param dict   the dictionary whose contents are to written
    */
   public static void write(OutputStream out, byte[] data, int offset, int length, Dictionary dict)
     throws IOException {
@@ -103,7 +103,7 @@ public interface Dictionary {
    * @param data   the data to be written in ByteBuffer
    * @param offset the offset
    * @param length length to be written
-   * @param dict   the dictionary whose contents are to written n
+   * @param dict   the dictionary whose contents are to written
    */
   public static void write(OutputStream out, ByteBuffer data, int offset, int length,
     Dictionary dict) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
index 7cfa007478f..97e1e9d3345 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
@@ -118,8 +118,8 @@ public class StreamUtils {
   }
 
   /**
-   * Reads a varInt value stored in an array. n * Input array where the varInt is available n *
-   * Offset in the input array where varInt is available
+   * Reads a varInt value stored in an array. Input array where the varInt is available Offset in
+   * the input array where varInt is available
    * @return A pair of integers in which first value is the actual decoded varInt value and second
    *         value as number of bytes taken by this varInt for it's storage in the input array.
    * @throws IOException When varint is malformed and not able to be read correctly
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
index 27eca9479d6..9e77bfcd04b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
@@ -85,7 +85,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
   public abstract int position();
 
   /**
-   * Sets this ByteBuff's position to the given value. n * @return this object
+   * Sets this ByteBuff's position to the given value.
+   * @return this object
    */
   public abstract ByteBuff position(int position);
 
@@ -184,7 +185,7 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
   public abstract byte get();
 
   /**
-   * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
+   * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
    * @return the byte at the given index
    */
   public abstract byte get(int index);
@@ -244,7 +245,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
   public abstract ByteBuff put(byte[] src, int offset, int length);
 
   /**
-   * Copies from the given byte[] to this ByteBuff n * @return this ByteBuff
+   * Copies from the given byte[] to this ByteBuff
+   * @return this ByteBuff
    * @param src source byte array
    * @return this ByteBuff
    */
@@ -269,14 +271,15 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
    * Fetches the short value at the given index. Does not change position of the underlying
    * ByteBuffers. The caller is sure that the index will be after the current position of this
    * ByteBuff. So even if the current short does not fit in the current item we can safely move to
-   * the next item and fetch the remaining bytes forming the short n * @return the short value at
-   * the given index
+   * the next item and fetch the remaining bytes forming the short
+   * @return the short value at the given index
    */
   public abstract short getShort(int index);
 
   /**
    * Fetches the short value at the given offset from current position. Does not change position of
-   * the underlying ByteBuffers. n * @return the short value at the given index.
+   * the underlying ByteBuffers.
+   * @return the short value at the given index.
    */
   public abstract short getShortAfterPosition(int offset);
 
@@ -319,13 +322,15 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
    * Fetches the long at the given index. Does not change position of the underlying ByteBuffers.
    * The caller is sure that the index will be after the current position of this ByteBuff. So even
    * if the current long does not fit in the current item we can safely move to the next item and
-   * fetch the remaining bytes forming the long n * @return the long value at the given index
+   * fetch the remaining bytes forming the long
+   * @return the long value at the given index
    */
   public abstract long getLong(int index);
 
   /**
    * Fetches the long value at the given offset from current position. Does not change position of
-   * the underlying ByteBuffers. n * @return the long value at the given index.
+   * the underlying ByteBuffers.
+   * @return the long value at the given index.
    */
   public abstract long getLongAfterPosition(int offset);
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
index c55ee021bd0..ddd567eb4b9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
@@ -149,8 +149,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
-   * * @return the byte at the given index
+   * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
+   * @return the byte at the given index
    */
   @Override
   public byte get(int index) {
@@ -201,8 +201,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Fetches the int at the given index. Does not change position of the underlying ByteBuffers n
-   * * @return the int value at the given index
+   * Fetches the int at the given index. Does not change position of the underlying ByteBuffers
+   * @return the int value at the given index
    */
   @Override
   public int getInt(int index) {
@@ -235,8 +235,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Fetches the short at the given index. Does not change position of the underlying ByteBuffers n
-   * * @return the short value at the given index
+   * Fetches the short at the given index. Does not change position of the underlying ByteBuffers
+   * @return the short value at the given index
    */
   @Override
   public short getShort(int index) {
@@ -347,8 +347,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Fetches the long at the given index. Does not change position of the underlying ByteBuffers n
-   * * @return the long value at the given index
+   * Fetches the long at the given index. Does not change position of the underlying ByteBuffers
+   * @return the long value at the given index
    */
   @Override
   public long getLong(int index) {
@@ -388,7 +388,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Sets this MBB's position to the given value. n * @return this object
+   * Sets this MBB's position to the given value.
+   * @return this object
    */
   @Override
   public MultiByteBuff position(int position) {
@@ -569,7 +570,7 @@ public class MultiByteBuff extends ByteBuff {
 
   /**
    * Copies the content from this MBB's current position to the byte array and fills it. Also
-   * advances the position of the MBB by the length of the byte[]. n
+   * advances the position of the MBB by the length of the byte[].
    */
   @Override
   public void get(byte[] dst) {
@@ -615,7 +616,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Marks the limit of this MBB. n * @return This MBB
+   * Marks the limit of this MBB.
+   * @return This MBB
    */
   @Override
   public MultiByteBuff limit(int limit) {
@@ -686,8 +688,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Writes a byte to this MBB at the current position and increments the position n * @return this
-   * object
+   * Writes a byte to this MBB at the current position and increments the position
+   * @return this object
    */
   @Override
   public MultiByteBuff put(byte b) {
@@ -960,7 +962,7 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Jumps the current position of this MBB by specified length. n
+   * Jumps the current position of this MBB by specified length.
    */
   @Override
   public MultiByteBuff skip(int length) {
@@ -982,7 +984,7 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Jumps back the current position of this MBB by specified length. n
+   * Jumps back the current position of this MBB by specified length.
    */
   @Override
   public MultiByteBuff moveBack(int length) {
@@ -1109,8 +1111,8 @@ public class MultiByteBuff extends ByteBuff {
   }
 
   /**
-   * Copy the content from this MBB to a byte[] based on the given offset and length n * the
-   * position from where the copy should start n * the length upto which the copy has to be done
+   * Copy the content from this MBB to a byte[] based on the given offset and length the position
+   * from where the copy should start the length upto which the copy has to be done
    * @return byte[] with the copied contents from this MBB.
    */
   @Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
index 9ef9e2ddc17..e2cac4b6b56 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
@@ -179,7 +179,7 @@ public abstract class User {
 
   /**
    * Wraps an underlying {@code UserGroupInformation} instance.
-   * @param ugi The base Hadoop user n
+   * @param ugi The base Hadoop user
    */
   public static User create(UserGroupInformation ugi) {
     if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
index fcf6cc64896..436b5bbc69a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
@@ -181,7 +181,7 @@ public class UserProvider extends BaseConfigurable {
 
   /**
    * Wraps an underlying {@code UserGroupInformation} instance.
-   * @param ugi The base Hadoop user n
+   * @param ugi The base Hadoop user
    */
   public User create(UserGroupInformation ugi) {
     if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
index 179074ef00c..88ee9c9666a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
@@ -69,7 +69,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange
 
   /**
    * Update the beginning of this range. {@code offset + length} may not be greater than
-   * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+   * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
    * @return this.
    */
   @Override
@@ -82,7 +82,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange
   /**
    * Update the length of this range. {@code offset + length} should not be greater than
    * {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
-   * {@code position} to {@code length}. n * The new length of this range.
+   * {@code position} to {@code length}. The new length of this range.
    * @return this.
    */
   @Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
index 276203f5663..d57671ca503 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
@@ -890,7 +890,7 @@ public final class ByteBufferUtils {
   }
 
   /**
-   * n * ByteBuffer to hash n * offset to start from n * length to hash
+   * ByteBuffer to hash offset to start from length to hash
    */
   public static int hashCode(ByteBuffer buf, int offset, int length) {
     int hash = 1;
@@ -1005,7 +1005,7 @@ public final class ByteBufferUtils {
    * @param buf    The ByteBuffer
    * @param offset Offset to int value
    * @param length Number of bytes used to store the int value.
-   * @return the int value n * if there's not enough bytes left in the buffer after the given offset
+   * @return the int value if there's not enough bytes left in the buffer after the given offset
    */
   public static int readAsInt(ByteBuffer buf, int offset, final int length) {
     if (offset + length > buf.limit()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
index 64bd5cb3b6c..4addf9057e2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
@@ -63,14 +63,13 @@ public interface ByteRange extends Comparable<ByteRange> {
 
   /**
    * Nullifies this ByteRange. That is, it becomes a husk, being a range over no byte[] whatsoever.
-   * n
    */
   public ByteRange unset();
 
   /**
    * Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
    * is set to {@code capacity}.
-   * @param capacity the size of a new byte[]. n
+   * @param capacity the size of a new byte[].
    */
   public ByteRange set(int capacity);
 
@@ -78,7 +77,7 @@ public interface ByteRange extends Comparable<ByteRange> {
    * Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
    * is set to {@code bytes.length}. A null {@code bytes} IS supported, in which case this method
    * will behave equivalently to {@link #unset()}.
-   * @param bytes the array to wrap. n
+   * @param bytes the array to wrap.
    */
   public ByteRange set(byte[] bytes);
 
@@ -188,21 +187,21 @@ public interface ByteRange extends Comparable<ByteRange> {
   /**
    * Store the short value at {@code index}
    * @param index the index in the range where {@code val} is stored
-   * @param val   the value to store n
+   * @param val   the value to store
    */
   public ByteRange putShort(int index, short val);
 
   /**
    * Store the int value at {@code index}
    * @param index the index in the range where {@code val} is stored
-   * @param val   the value to store n
+   * @param val   the value to store
    */
   public ByteRange putInt(int index, int val);
 
   /**
    * Store the long value at {@code index}
    * @param index the index in the range where {@code val} is stored
-   * @param val   the value to store n
+   * @param val   the value to store
    */
   public ByteRange putLong(int index, long val);
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index f7d35370e80..afa222ddc41 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -267,8 +267,9 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * Compares the bytes in this object to the specified byte array n * @return Positive if left is
-   * bigger than right, 0 if they are equal, and negative if left is smaller than right.
+   * Compares the bytes in this object to the specified byte array
+   * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
+   *         smaller than right.
    */
   public int compareTo(final byte[] that) {
     return BYTES_RAWCOMPARATOR.compare(this.bytes, this.offset, this.length, that, 0, that.length);
@@ -570,7 +571,8 @@ public class Bytes implements Comparable<Bytes> {
 
   /**
    * Write a printable representation of a byte array.
-   * @param b byte array n * @see #toStringBinary(byte[], int, int)
+   * @param b byte array
+   * @see #toStringBinary(byte[], int, int)
    */
   public static String toStringBinary(final byte[] b) {
     if (b == null) return "null";
@@ -2244,7 +2246,7 @@ public class Bytes implements Comparable<Bytes> {
    * Copy the byte array given in parameter and return an instance of a new byte array with the same
    * length and the same content.
    * @param bytes the byte array to copy from
-   * @return a copy of the given designated byte array nn
+   * @return a copy of the given designated byte array
    */
   public static byte[] copy(byte[] bytes, final int offset, final int length) {
     if (bytes == null) return null;
@@ -2425,7 +2427,7 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * Fill given array with zeros at the specified position. nnn
+   * Fill given array with zeros at the specified position.
    */
   public static void zero(byte[] b, int offset, int length) {
     checkPositionIndex(offset, b.length, "offset");
@@ -2508,7 +2510,8 @@ public class Bytes implements Comparable<Bytes> {
   }
 
   /**
-   * Create a byte array which is multiple given bytes nn * @return byte array
+   * Create a byte array which is multiple given bytes
+   * @return byte array
    */
   public static byte[] multiple(byte[] srcBytes, int multiNum) {
     if (multiNum <= 0) {
@@ -2563,7 +2566,7 @@ public class Bytes implements Comparable<Bytes> {
 
   /**
    * Create a byte array from a string of hash digits. The length of the string must be a multiple
-   * of 2 n
+   * of 2
    */
   public static byte[] fromHex(String hex) {
     checkArgument(hex.length() % 2 == 0, "length must be a multiple of 2");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
index d943803fb2f..dc810834a66 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
@@ -85,8 +85,8 @@ public enum ChecksumType {
   }
 
   /**
-   * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes. n
-   * * @return Type associated with passed code.
+   * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
+   * @return Type associated with passed code.
    */
   public static ChecksumType codeToType(final byte b) {
     for (ChecksumType t : ChecksumType.values()) {
@@ -98,8 +98,8 @@ public enum ChecksumType {
   }
 
   /**
-   * Map a checksum name to a specific type. Do our own names. n * @return Type associated with
-   * passed code.
+   * Map a checksum name to a specific type. Do our own names.
+   * @return Type associated with passed code.
    */
   public static ChecksumType nameToType(final String name) {
     for (ChecksumType t : ChecksumType.values()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
index 84e70873727..1b3eef180a5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
@@ -27,10 +27,9 @@ public class Classes {
 
   /**
    * Equivalent of {@link Class#forName(String)} which also returns classes for primitives like
-   * <code>boolean</code>, etc. n * The name of the class to retrieve. Can be either a normal class
-   * or a primitive class.
-   * @return The class specified by <code>className</code> n * If the requested class can not be
-   *         found.
+   * <code>boolean</code>, etc. The name of the class to retrieve. Can be either a normal class or a
+   * primitive class.
+   * @return The class specified by <code>className</code> If the requested class can not be found.
    */
   public static Class<?> extendedForName(String className) throws ClassNotFoundException {
     Class<?> valueType;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index 93a70e698bc..5d9c5fa9681 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -332,7 +332,7 @@ public final class CommonFSUtils {
    * Returns the URI in the string format
    * @param c configuration
    * @param p path
-   * @return - the URI's to string format n
+   * @return - the URI's to string format
    */
   public static String getDirUri(final Configuration c, Path p) throws IOException {
     if (p.toUri().getScheme() != null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
index 531d12085fe..a5e6a65efc9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
@@ -215,7 +215,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
    * @param parent     the parent class loader for exempted classes
    * @param pathPrefix a prefix used in temp path name to store the jar file locally
    * @param conf       the configuration used to create the class loader, if needed
-   * @return a CoprocessorClassLoader for the coprocessor jar path n
+   * @return a CoprocessorClassLoader for the coprocessor jar path
    */
   public static CoprocessorClassLoader getClassLoader(final Path path, final ClassLoader parent,
     final String pathPrefix, final Configuration conf) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
index 276e436ed13..0cd1b41c502 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
@@ -58,7 +58,7 @@ public class KeyLocker<K> {
     }, NB_CONCURRENT_LOCKS);
 
   /**
-   * Return a lock for the given key. The lock is already locked. n
+   * Return a lock for the given key. The lock is already locked.
    */
   public ReentrantLock acquireLock(K key) {
     if (key == null) throw new IllegalArgumentException("key must not be null");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
index d967f5d53a7..7e143e15de2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
@@ -29,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 public class MD5Hash {
 
   /**
-   * Given a byte array, returns in MD5 hash as a hex string. n * @return SHA1 hash as a 32
-   * character hex string.
+   * Given a byte array, returns in MD5 hash as a hex string.
+   * @return SHA1 hash as a 32 character hex string.
    */
   public static String getMD5AsHex(byte[] key) {
     return getMD5AsHex(key, 0, key.length);
@@ -39,8 +39,8 @@ public class MD5Hash {
   /**
    * Given a byte array, returns its MD5 hash as a hex string. Only "length" number of bytes
    * starting at "offset" within the byte array are used.
-   * @param key the key to hash (variable length byte array) nn * @return MD5 hash as a 32 character
-   *            hex string.
+   * @param key the key to hash (variable length byte array)
+   * @return MD5 hash as a 32 character hex string.
    */
   public static String getMD5AsHex(byte[] key, int offset, int length) {
     try {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
index dd8eb4f1858..fe8d111dfbe 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
@@ -74,14 +74,14 @@ public class Pair<T1, T2> implements Serializable {
   }
 
   /**
-   * Return the first element stored in the pair. n
+   * Return the first element stored in the pair.
    */
   public T1 getFirst() {
     return first;
   }
 
   /**
-   * Return the second element stored in the pair. n
+   * Return the second element stored in the pair.
    */
   public T2 getSecond() {
     return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
index 44bc2b81dc0..ef44fc4e043 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
@@ -42,14 +42,14 @@ public class PairOfSameType<T> implements Iterable<T> {
   }
 
   /**
-   * Return the first element stored in the pair. n
+   * Return the first element stored in the pair.
    */
   public T getFirst() {
     return first;
   }
 
   /**
-   * Return the second element stored in the pair. n
+   * Return the second element stored in the pair.
    */
   public T getSecond() {
     return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
index efa52612be6..cb61cfbe246 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
@@ -156,12 +156,12 @@ public interface PositionedByteRange extends ByteRange {
   public PositionedByteRange put(byte[] val, int offset, int length);
 
   /**
-   * Limits the byte range upto a specified value. Limit cannot be greater than capacity nn
+   * Limits the byte range upto a specified value. Limit cannot be greater than capacity
    */
   public PositionedByteRange setLimit(int limit);
 
   /**
-   * Return the current limit n
+   * Return the current limit
    */
   public int getLimit();
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index c3d4d82f6bd..f73064f70a8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@ -77,8 +77,8 @@ public final class PrettyPrinter {
 
   /**
    * Convert a human readable string to its value.
-   * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit) nn * @return the value
-   *      corresponding to the human readable string
+   * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
+   * @return the value corresponding to the human readable string
    */
   public static String valueOf(final String pretty, final Unit unit) throws HBaseException {
     StringBuilder value = new StringBuilder();
@@ -155,7 +155,8 @@ public final class PrettyPrinter {
    * Convert a human readable time interval to seconds. Examples of the human readable time
    * intervals are: 50 DAYS 1 HOUR 30 MINUTES , 25000 SECONDS etc. The units of time specified can
    * be in uppercase as well as lowercase. Also, if a single number is specified without any time
-   * unit, it is assumed to be in seconds. n * @return value in seconds
+   * unit, it is assumed to be in seconds.
+   * @return value in seconds
    */
   private static long humanReadableIntervalToSec(final String humanReadableInterval)
     throws HBaseException {
@@ -261,7 +262,7 @@ public final class PrettyPrinter {
    * KB , 25000 B etc. The units of size specified can be in uppercase as well as lowercase. Also,
    * if a single number is specified without any time unit, it is assumed to be in bytes.
    * @param humanReadableSize human readable size
-   * @return value in bytes n
+   * @return value in bytes
    */
   private static long humanReadableSizeToBytes(final String humanReadableSize)
     throws HBaseException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
index 24b9f2d997b..868c731e0a8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
@@ -66,22 +66,22 @@ public class SimpleMutableByteRange extends AbstractByteRange {
 
   /**
    * Create a new {@code ByteRange} over a new backing array of size {@code capacity}. The range's
-   * offset and length are 0 and {@code capacity}, respectively. n * the size of the backing array.
+   * offset and length are 0 and {@code capacity}, respectively. the size of the backing array.
    */
   public SimpleMutableByteRange(int capacity) {
     this(new byte[capacity]);
   }
 
   /**
-   * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap.
+   * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap.
    */
   public SimpleMutableByteRange(byte[] bytes) {
     set(bytes);
   }
 
   /**
-   * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap. n * The
-   * offset into {@code bytes} considered the beginning of this range. n * The length of this range.
+   * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap. The offset
+   * into {@code bytes} considered the beginning of this range. The length of this range.
    */
   public SimpleMutableByteRange(byte[] bytes, int offset, int length) {
     set(bytes, offset, length);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
index d91fd712f37..68e99c3053b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
@@ -70,7 +70,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
 
   /**
    * Create a new {@code PositionedByteRange} over a new backing array of size {@code capacity}. The
-   * range's offset and length are 0 and {@code capacity}, respectively. n * the size of the backing
+   * range's offset and length are 0 and {@code capacity}, respectively. the size of the backing
    * array.
    */
   public SimplePositionedMutableByteRange(int capacity) {
@@ -78,17 +78,15 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
   }
 
   /**
-   * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
-   * wrap.
+   * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
    */
   public SimplePositionedMutableByteRange(byte[] bytes) {
     set(bytes);
   }
 
   /**
-   * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
-   * wrap. n * The offset into {@code bytes} considered the beginning of this range. n * The length
-   * of this range.
+   * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
+   * The offset into {@code bytes} considered the beginning of this range. The length of this range.
    */
   public SimplePositionedMutableByteRange(byte[] bytes, int offset, int length) {
     set(bytes, offset, length);
@@ -130,7 +128,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
 
   /**
    * Update the beginning of this range. {@code offset + length} may not be greater than
-   * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+   * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
    * @return this.
    */
   @Override
@@ -143,7 +141,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
   /**
    * Update the length of this range. {@code offset + length} should not be greater than
    * {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
-   * {@code position} to {@code length}. n * The new length of this range.
+   * {@code position} to {@code length}. The new length of this range.
    * @return this.
    */
   @Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
index 0caecf649ce..e23c62045fa 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
@@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface TimeMeasurable<T> {
 
   /**
-   * Measure elapsed time. n
+   * Measure elapsed time.
    */
   T measure();
 }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
index 48b60a49616..3aa8a6ec123 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
@@ -193,7 +193,7 @@ public final class UnsafeAccess {
 
   /**
    * Reads a int value at the given Object's offset considering it was written in big-endian format.
-   * nn * @return int value at offset
+   * @return int value at offset
    */
   public static int toInt(Object ref, long offset) {
     if (LITTLE_ENDIAN) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
index 154bc0e42db..2c600e3c5fd 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
@@ -73,7 +73,7 @@ public class WindowMovingAverage<T> extends MovingAverage<T> {
 
   /**
    * Get statistics at index.
-   * @param index index of bar n
+   * @param index index of bar
    */
   protected long getStatisticsAtIndex(int index) {
     if (index < 0 || index >= getNumberOfStatistics()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
index 29977997389..d1f22dca0f9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
@@ -105,8 +105,8 @@ public final class ZKConfig {
   }
 
   /**
-   * Return the ZK Quorum servers string given the specified configuration n * @return Quorum
-   * servers String
+   * Return the ZK Quorum servers string given the specified configuration
+   * @return Quorum servers String
    */
   private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf) {
     String defaultClientPort = Integer.toString(
@@ -168,8 +168,8 @@ public final class ZKConfig {
 
   /**
    * Separate the given key into the three configurations it should contain: hbase.zookeeper.quorum,
-   * hbase.zookeeper.client.port and zookeeper.znode.parent n * @return the three configuration in
-   * the described order n
+   * hbase.zookeeper.client.port and zookeeper.znode.parent
+   * @return the three configuration in the described order
    */
   public static ZKClusterKey transformClusterKey(String key) throws IOException {
     List<String> parts = Splitter.on(':').splitToList(key);
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index f76148e9dd1..af944ad2a0f 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -258,7 +258,7 @@ public class TestHBaseConfiguration {
     }
 
     /**
-     * Wrapper to fetch the configured {@code List<CredentialProvider>}s. n * Configuration with
+     * Wrapper to fetch the configured {@code List<CredentialProvider>}s. Configuration with
      * GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS defined
      * @return List of CredentialProviders, or null if they could not be loaded
      */
@@ -283,8 +283,8 @@ public class TestHBaseConfiguration {
 
     /**
      * Create a CredentialEntry using the configured Providers. If multiple CredentialProviders are
-     * configured, the first will be used. n * Configuration for the CredentialProvider n *
-     * CredentialEntry name (alias) n * The credential
+     * configured, the first will be used. Configuration for the CredentialProvider CredentialEntry
+     * name (alias) The credential
      */
     public void createEntry(Configuration conf, String name, char[] credential) throws Exception {
       if (!isHadoopCredentialProviderAvailable()) {
@@ -303,8 +303,8 @@ public class TestHBaseConfiguration {
 
     /**
      * Create a CredentialEntry with the give name and credential in the credentialProvider. The
-     * credentialProvider argument must be an instance of Hadoop CredentialProvider. n * Instance of
-     * CredentialProvider n * CredentialEntry name (alias) n * The credential to store
+     * credentialProvider argument must be an instance of Hadoop CredentialProvider. Instance of
+     * CredentialProvider CredentialEntry name (alias) The credential to store
      */
     private void createEntryInProvider(Object credentialProvider, String name, char[] credential)
       throws Exception {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java
index 583e7efcfa9..0185ebff0ec 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java
@@ -409,7 +409,7 @@ public final class X509TestContext {
    * circumstances to inject a "bad" certificate where the keystore doesn't match the CA in the
    * truststore. Or use it to create a connection without a truststore.
    * @see #setConfigurations(KeyStoreFileType, KeyStoreFileType) which sets both keystore and
-   *      truststore and is more applicable to general use. nnn
+   *      truststore and is more applicable to general use.
    */
   public void setKeystoreConfigurations(KeyStoreFileType keyStoreFileType, Configuration confToSet)
     throws IOException {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java
index 56d3c8cb859..78d70f8f581 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java
@@ -371,7 +371,7 @@ final class X509TestHelpers {
    * @param cert        the certificate to serialize.
    * @param keyPassword an optional password to encrypt the trust store. If empty or null, the cert
    *                    will not be encrypted.
-   * @return the serialized bytes of the BCFKS trust store. nn
+   * @return the serialized bytes of the BCFKS trust store.
    */
   public static byte[] certToBCFKSTrustStoreBytes(X509Certificate cert, char[] keyPassword)
     throws IOException, GeneralSecurityException {
@@ -434,7 +434,7 @@ final class X509TestHelpers {
    * @param privateKey  the private key to serialize.
    * @param keyPassword an optional key password. If empty or null, the private key will not be
    *                    encrypted.
-   * @return the serialized bytes of the BCFKS key store. nn
+   * @return the serialized bytes of the BCFKS key store.
    */
   public static byte[] certAndPrivateKeyToBCFKSBytes(X509Certificate cert, PrivateKey privateKey,
     char[] keyPassword) throws IOException, GeneralSecurityException {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java
index b07a924a4e3..6635accedbb 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java
@@ -51,7 +51,7 @@ public class RandomDistribution {
 
     /**
      * Generate random integers from min (inclusive) to max (exclusive) following even distribution.
-     * n * The basic random number generator. n * Minimum integer n * maximum integer (exclusive).
+     * The basic random number generator. Minimum integer maximum integer (exclusive).
      */
     public Flat(Random random, int min, int max) {
       if (min >= max) {
@@ -82,17 +82,16 @@ public class RandomDistribution {
     private final ArrayList<Double> v;
 
     /**
-     * Constructor n * The random number generator. n * minimum integer (inclusvie) n * maximum
-     * integer (exclusive) n * parameter sigma. (sigma > 1.0)
+     * Constructor The random number generator. minimum integer (inclusvie) maximum integer
+     * (exclusive) parameter sigma. (sigma > 1.0)
      */
     public Zipf(Random r, int min, int max, double sigma) {
       this(r, min, max, sigma, DEFAULT_EPSILON);
     }
 
     /**
-     * Constructor. n * The random number generator. n * minimum integer (inclusvie) n * maximum
-     * integer (exclusive) n * parameter sigma. (sigma > 1.0) n * Allowable error percentage (0 <
-     * epsilon < 1.0).
+     * Constructor. The random number generator. minimum integer (inclusvie) maximum integer
+     * (exclusive) parameter sigma. (sigma > 1.0) Allowable error percentage (0 < epsilon < 1.0).
      */
     public Zipf(Random r, int min, int max, double sigma, double epsilon) {
       if ((max <= min) || (sigma <= 1) || (epsilon <= 0) || (epsilon >= 0.5)) {
@@ -178,8 +177,8 @@ public class RandomDistribution {
 
     /**
      * Generate random integers from min (inclusive) to max (exclusive) following Binomial
-     * distribution. n * The basic random number generator. n * Minimum integer n * maximum integer
-     * (exclusive). n * parameter.
+     * distribution. The basic random number generator. Minimum integer maximum integer (exclusive).
+     * parameter.
      */
     public Binomial(Random random, int min, int max, double p) {
       if (min >= max) {
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index d89f512858e..fb08dcc190d 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -54,7 +54,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
   void updatePut(long t);
 
   /**
-   * Update the PutBatch time histogram if a batch contains a Put op n
+   * Update the PutBatch time histogram if a batch contains a Put op
    */
   void updatePutBatch(long t);
 
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
index 1de3a34bfeb..05f967731a5 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
@@ -80,7 +80,7 @@ public interface MetricsRESTSource extends BaseSource, JvmPauseMonitorSource {
   void incrementSucessfulPutRequests(int inc);
 
   /**
-   * Increment the number of successful Delete requests. n
+   * Increment the number of successful Delete requests.
    */
   void incrementSucessfulDeleteRequests(int inc);
 
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
index 214626204e7..3d938bdd539 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
@@ -36,7 +36,7 @@ public interface MetricsThriftServerSource extends ExceptionTrackingSource, JvmP
   String ACTIVE_WORKER_COUNT_KEY = "numActiveWorkers";
 
   /**
-   * Add how long an operation was in the queue. n
+   * Add how long an operation was in the queue.
    */
   void incTimeInQueue(long time);
 
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
index 66d5e3d87a4..0581d431ebf 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
@@ -41,7 +41,7 @@ public interface MetricHistogram {
   String NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME = "_99.9th_percentile";
 
   /**
-   * Add a single value to a histogram's stream of values. n
+   * Add a single value to a histogram's stream of values.
    */
   void add(long value);
 
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
index f2d3f63dbcb..fd3cfcc8b87 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
@@ -68,7 +68,7 @@ public class MetricSampleQuantiles {
   /**
    * Specifies the allowable error for this rank, depending on which quantiles are being targeted.
    * This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this
-   * rank can be. n * the index in the list of samples
+   * rank can be. the index in the list of samples
    */
   private double allowableError(int rank) {
     int size = samples.size();
@@ -208,7 +208,7 @@ public class MetricSampleQuantiles {
 
   /**
    * Get a snapshot of the current values of all the tracked quantiles.
-   * @return snapshot of the tracked quantiles n * if no items have been added to the estimator
+   * @return snapshot of the tracked quantiles if no items have been added to the estimator
    */
   synchronized public Map<MetricQuantile, Long> snapshot() throws IOException {
     // flush the buffer first for best results
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 50cefc4c39a..ce1b387bc15 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -252,10 +252,10 @@ public class HttpServer implements FilterContainer {
     private int port = -1;
 
     /**
-     * Add an endpoint that the HTTP server should listen to. n * the endpoint of that the HTTP
-     * server should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host
-     * specifies the binding address, and the port specifies the listening port. Unspecified or zero
-     * port means that the server can listen to any port.
+     * Add an endpoint that the HTTP server should listen to. the endpoint of that the HTTP server
+     * should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host specifies
+     * the binding address, and the port specifies the listening port. Unspecified or zero port
+     * means that the server can listen to any port.
      */
     public Builder addEndpoint(URI endpoint) {
       endpoints.add(endpoint);
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
index c8456a461bb..494a30c3e77 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -141,7 +141,7 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter {
 
   /**
    * The purpose of this function is to get the doAs parameter of a http request case insensitively
-   * n * @return doAs parameter if exists or null otherwise
+   * @return doAs parameter if exists or null otherwise
    */
   public static String getDoasFromHeader(final HttpServletRequest request) {
     String doas = null;
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index d393187b1e4..978de8530ef 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -146,8 +146,8 @@ public class JMXJsonServlet extends HttpServlet {
   }
 
   /**
-   * Process a GET request for the specified resource. n * The servlet request we are processing n *
-   * The servlet response we are creating
+   * Process a GET request for the specified resource. The servlet request we are processing The
+   * servlet response we are creating
    */
   @Override
   public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
index 64119ec5095..cc6a99bd300 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
@@ -112,7 +112,7 @@ public final class JSONMetricUtil {
    * Method for building map used for constructing ObjectName. Mapping is done with arrays indices
    * @param keys   Map keys
    * @param values Map values
-   * @return Map or null if arrays are empty * or have different number of elements
+   * @return Map or null if arrays are empty or have different number of elements
    */
   @SuppressWarnings("JdkObsolete") // javax requires hashtable param for ObjectName constructor
   public static Hashtable<String, String> buldKeyValueTable(String[] keys, String[] values) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index b248f4dc17a..e57cf007325 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -77,7 +77,7 @@ public class DistributedHBaseCluster extends HBaseCluster {
   }
 
   /**
-   * Returns a ClusterStatus for this HBase cluster n
+   * Returns a ClusterStatus for this HBase cluster
    */
   @Override
   public ClusterMetrics getClusterMetrics() throws IOException {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index 4e3e0ac8440..e84eb394040 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -669,7 +669,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
   }
 
   /**
-   * After adding data to the table start a mr job to nnn
+   * After adding data to the table start a mr job to
    */
   private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
     LOG.info("Running check");
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 2532e41ef55..c1854d87c19 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -1148,8 +1148,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
       }
 
       /**
-       * nn * @return Return new byte array that has <code>ordinal</code> as prefix on front taking
-       * up Bytes.SIZEOF_SHORT bytes followed by <code>r</code>
+       * Returns new byte array that has <code>ordinal</code> as prefix on front taking up
+       * Bytes.SIZEOF_SHORT bytes followed by <code>r</code>
        */
       public static byte[] addPrefixFlag(final int ordinal, final byte[] r) {
         byte[] prefix = Bytes.toBytes((short) ordinal);
@@ -1163,7 +1163,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
       }
 
       /**
-       * n * @return Type from the Counts enum of this row. Reads prefix added by
+       * Returns type from the Counts enum of this row. Reads prefix added by
        * {@link #addPrefixFlag(int, byte[])}
        */
       public static VerifyCounts whichType(final byte[] bs) {
@@ -1171,9 +1171,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
         return VerifyCounts.values()[ordinal];
       }
 
-      /**
-       * n * @return Row bytes minus the type flag.
-       */
+      /** Returns Row bytes minus the type flag. */
       public static byte[] getRowOnly(BytesWritable bw) {
         byte[] bytes = new byte[bw.getLength() - Bytes.SIZEOF_SHORT];
         System.arraycopy(bw.getBytes(), Bytes.SIZEOF_SHORT, bytes, 0, bytes.length);
@@ -1262,7 +1260,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
 
       /**
        * Dump out extra info around references if there are any. Helps debugging.
-       * @return StringBuilder filled with references if any. n
+       * @return StringBuilder filled with references if any.
        */
       @SuppressWarnings("JavaUtilDate")
       private StringBuilder dumpExtraInfoOnRefs(final BytesWritable key, final Context context,
@@ -1414,8 +1412,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
     }
 
     /**
-     * Verify the values in the Counters against the expected number of entries written. n *
-     * Expected number of referenced entrires n * The Job's Counters object
+     * Verify the values in the Counters against the expected number of entries written. Expected
+     * number of referenced entrires The Job's Counters object
      * @return True if the values match what's expected, false otherwise
      */
     protected boolean verifyExpectedValues(long expectedReferenced, Counters counters) {
@@ -1443,7 +1441,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
 
     /**
      * Verify that the Counters don't contain values which indicate an outright failure from the
-     * Reducers. n * The Job's counters
+     * Reducers. The Job's counters
      * @return True if the "bad" counter objects are 0, false otherwise
      */
     protected boolean verifyUnexpectedValues(Counters counters) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
index be7af583822..3fb9d4633e1 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
@@ -185,7 +185,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
     /**
      * This tears down any tables that existed from before and rebuilds the tables and schemas on
      * the source cluster. It then sets up replication from the source to the sink cluster by using
-     * the {@link org.apache.hadoop.hbase.client.replication.ReplicationAdmin} connection. n
+     * the {@link org.apache.hadoop.hbase.client.replication.ReplicationAdmin} connection.
      */
     protected void setupTablesAndReplication() throws Exception {
       TableName tableName = getTableName(source.getConfiguration());
@@ -261,7 +261,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
 
     /**
      * Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Generator} in the
-     * source cluster. This assumes that the tables have been setup via setupTablesAndReplication. n
+     * source cluster. This assumes that the tables have been setup via setupTablesAndReplication.
      */
     protected void runGenerator() throws Exception {
       Path outputPath = new Path(outputDir);
@@ -282,7 +282,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
      * Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Verify} in the sink
      * cluster. If replication is working properly the data written at the source cluster should be
      * available in the sink cluster after a reasonable gap
-     * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster n
+     * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster
      */
     protected void runVerify(long expectedNumNodes) throws Exception {
       Path outputPath = new Path(outputDir);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
index 60e24be5128..63dc0bb28c8 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
@@ -36,9 +36,6 @@ public class Driver {
     pgd = pgd0;
   }
 
-  /**
-   * nn
-   */
   public static void main(String[] args) throws Throwable {
     pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table");
     ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
index 3d609ffd73b..58d8f49839f 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
@@ -74,7 +74,7 @@ public class GroupingTableMap extends MapReduceBase
 
   /**
    * Extract the grouping columns from value to construct a new key. Pass the new key and value to
-   * reduce. If any of the grouping columns are not found in the value, the record is skipped. nnnnn
+   * reduce. If any of the grouping columns are not found in the value, the record is skipped.
    */
   public void map(ImmutableBytesWritable key, Result value,
     OutputCollector<ImmutableBytesWritable, Result> output, Reporter reporter) throws IOException {
@@ -88,8 +88,8 @@ public class GroupingTableMap extends MapReduceBase
 
   /**
    * Extract columns values from the current record. This method returns null if any of the columns
-   * are not found. Override this method if you want to deal with nulls differently. n * @return
-   * array of byte values
+   * are not found. Override this method if you want to deal with nulls differently.
+   * @return array of byte values
    */
   protected byte[][] extractKeyValues(Result r) {
     byte[][] keyVals = null;
@@ -115,8 +115,8 @@ public class GroupingTableMap extends MapReduceBase
 
   /**
    * Create a key by concatenating multiple column values. Override this function in order to
-   * produce different types of keys. n * @return key generated by concatenating multiple column
-   * values
+   * produce different types of keys.
+   * @return key generated by concatenating multiple column values
    */
   protected ImmutableBytesWritable createGroupKey(byte[][] vals) {
     if (vals == null) {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
index 16256942d72..8af0b4b4749 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
@@ -53,7 +53,7 @@ public class IdentityTableMap extends MapReduceBase
   }
 
   /**
-   * Pass the key, value to reduce nnnnn
+   * Pass the key, value to reduce
    */
   public void map(ImmutableBytesWritable key, Result value,
     OutputCollector<ImmutableBytesWritable, Result> output, Reporter reporter) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
index 79d5f3dc8c0..29f9478da10 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
@@ -38,7 +38,7 @@ public class IdentityTableReduce extends MapReduceBase
   private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReduce.class.getName());
 
   /**
-   * No aggregation, output pairs of (key, record) nnnnn
+   * No aggregation, output pairs of (key, record)
    */
   public void reduce(ImmutableBytesWritable key, Iterator<Put> values,
     OutputCollector<ImmutableBytesWritable, Put> output, Reporter reporter) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
index 24e9da0f28d..0e9f0deaf67 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
@@ -105,7 +105,6 @@ public class MultiTableSnapshotInputFormat extends TableSnapshotInputFormat
    * restoreDir. Sets:
    * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#RESTORE_DIRS_KEY},
    * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#SNAPSHOT_TO_SCANS_KEY}
-   * nnnn
    */
   public static void setInput(Configuration conf, Map<String, Collection<Scan>> snapshotScans,
     Path restoreDir) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
index 4f95950589c..2f6324a7ac5 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
@@ -65,9 +65,7 @@ public class RowCounter extends Configured implements Tool {
     }
   }
 
-  /**
-   * n * @return the JobConf n
-   */
+  /** Returns the JobConf */
   public JobConf createSubmittableJob(String[] args) throws IOException {
     JobConf c = new JobConf(getConf(), getClass());
     c.setJobName(NAME);
@@ -104,9 +102,6 @@ public class RowCounter extends Configured implements Tool {
     return 0;
   }
 
-  /**
-   * nn
-   */
   public static void main(String[] args) throws Exception {
     int errCode = ToolRunner.run(HBaseConfiguration.create(), new RowCounter(), args);
     System.exit(errCode);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index 34736bd6a3d..667629016d3 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -210,7 +210,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
   /**
    * Allows subclasses to initialize the table information.
    * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
-   * @param tableName  The {@link TableName} of the table to process. n
+   * @param tableName  The {@link TableName} of the table to process.
    */
   protected void initializeTable(Connection connection, TableName tableName) throws IOException {
     if (this.table != null || this.connection != null) {
@@ -240,7 +240,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
   }
 
   /**
-   * Allows subclasses to set the {@link TableRecordReader}. n * to provide other
+   * Allows subclasses to set the {@link TableRecordReader}. to provide other
    * {@link TableRecordReader} implementations.
    */
   protected void setTableRecordReader(TableRecordReader tableRecordReader) {
@@ -248,7 +248,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
   }
 
   /**
-   * Allows subclasses to set the {@link Filter} to be used. n
+   * Allows subclasses to set the {@link Filter} to be used.
    */
   protected void setRowFilter(Filter rowFilter) {
     this.rowFilter = rowFilter;
@@ -272,7 +272,7 @@ public abstract class TableInputFormatBase implements InputFormat<ImmutableBytes
 
   /**
    * Close the Table and related objects that were initialized via
-   * {@link #initializeTable(Connection, TableName)}. n
+   * {@link #initializeTable(Connection, TableName)}.
    */
   protected void closeTable() throws IOException {
     close(table, connection);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
index 270aeb186a4..0f217ad5801 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
@@ -105,7 +105,8 @@ public class TableOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
    * Failure to do so will drop writes.
    * @param ignored Ignored filesystem
    * @param job     Current JobConf
-   * @param name    Name of the job n * @return The newly created writer instance.
+   * @param name    Name of the job
+   * @return The newly created writer instance.
    * @throws IOException When creating the writer fails.
    */
   @Override
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
index e8765c44854..414403534a9 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
@@ -34,14 +34,14 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
   private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl();
 
   /**
-   * Restart from survivable exceptions by creating a new scanner. nn
+   * Restart from survivable exceptions by creating a new scanner.
    */
   public void restart(byte[] firstRow) throws IOException {
     this.recordReaderImpl.restart(firstRow);
   }
 
   /**
-   * Build the scanner. Not done in constructor to allow for extension. n
+   * Build the scanner. Not done in constructor to allow for extension.
    */
   public void init() throws IOException {
     this.recordReaderImpl.restart(this.recordReaderImpl.getStartRow());
@@ -82,26 +82,28 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
     this.recordReaderImpl.setRowFilter(rowFilter);
   }
 
+  @Override
   public void close() {
     this.recordReaderImpl.close();
   }
 
   /**
-   * n *
    * @see org.apache.hadoop.mapred.RecordReader#createKey()
    */
+  @Override
   public ImmutableBytesWritable createKey() {
     return this.recordReaderImpl.createKey();
   }
 
   /**
-   * n *
    * @see org.apache.hadoop.mapred.RecordReader#createValue()
    */
+  @Override
   public Result createValue() {
     return this.recordReaderImpl.createValue();
   }
 
+  @Override
   public long getPos() {
 
     // This should be the ordinal tuple in the range;
@@ -109,6 +111,7 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
     return this.recordReaderImpl.getPos();
   }
 
+  @Override
   public float getProgress() {
     // Depends on the total number of tuples and getPos
     return this.recordReaderImpl.getPos();
@@ -117,8 +120,9 @@ public class TableRecordReader implements RecordReader<ImmutableBytesWritable, R
   /**
    * @param key   HStoreKey as input key.
    * @param value MapWritable as input value
-   * @return true if there was more data n
+   * @return true if there was more data
    */
+  @Override
   public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
     return this.recordReaderImpl.next(key, value);
   }
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
index 952d60fc883..f1dd14da86a 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
@@ -154,7 +154,6 @@ public class TableRecordReaderImpl {
   }
 
   /**
-   * n *
    * @see org.apache.hadoop.mapred.RecordReader#createKey()
    */
   public ImmutableBytesWritable createKey() {
@@ -162,7 +161,6 @@ public class TableRecordReaderImpl {
   }
 
   /**
-   * n *
    * @see org.apache.hadoop.mapred.RecordReader#createValue()
    */
   public Result createValue() {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
index 2cb63ba7a6a..0bcb559ae3c 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
@@ -43,7 +43,7 @@ public class TableSplit implements InputSplit, Comparable<TableSplit> {
   }
 
   /**
-   * Constructor nnnn
+   * Constructor
    */
   public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) {
     this.m_tableName = tableName;
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
index a48ba49058a..8d12fe5d720 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
@@ -59,7 +59,7 @@ public class CellCreator {
    * @param value     column value
    * @param voffset   value offset
    * @param vlength   value length
-   * @return created Cell n
+   * @return created Cell
    */
   public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength,
     byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset,
@@ -83,7 +83,8 @@ public class CellCreator {
    * @param voffset       value offset
    * @param vlength       value length
    * @param visExpression visibility expression to be associated with cell
-   * @return created Cell n * @deprecated since 0.98.9
+   * @return created Cell
+   * @deprecated since 0.98.9
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-10560">HBASE-10560</a>
    */
   @Deprecated
@@ -111,7 +112,8 @@ public class CellCreator {
    * @param timestamp version timestamp
    * @param value     column value
    * @param voffset   value offset
-   * @param vlength   value length n * @return created Cell n
+   * @param vlength   value length
+   * @return created Cell
    */
   public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength,
     byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
index e09e7be98eb..ccbd826c91d 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
@@ -330,7 +330,7 @@ public class HashTable extends Configured implements Tool {
     }
 
     /**
-     * Open a TableHash.Reader starting at the first hash at or after the given key. n
+     * Open a TableHash.Reader starting at the first hash at or after the given key.
      */
     public Reader newReader(Configuration conf, ImmutableBytesWritable startKey)
       throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
index 3b94399cd88..665ff93a977 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
@@ -148,7 +148,7 @@ public class ImportTsv extends Configured implements Tool {
 
     /**
      * @param columnsSpecification the list of columns to parser out, comma separated. The row key
-     *                             should be the special token TsvParser.ROWKEY_COLUMN_SPEC n
+     *                             should be the special token TsvParser.ROWKEY_COLUMN_SPEC
      */
     public TsvParser(String columnsSpecification, String separatorStr) {
       // Configure separator
@@ -416,8 +416,8 @@ public class ImportTsv extends Configured implements Tool {
     }
 
     /**
-     * Return starting position and length of row key from the specified line bytes. nn * @return
-     * Pair of row key offset and length. n
+     * Return starting position and length of row key from the specified line bytes.
+     * @return Pair of row key offset and length.
      */
     public Pair<Integer, Integer> parseRowKey(byte[] lineBytes, int length)
       throws BadTsvLineException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
index fb42e332833..ef3179830f9 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
@@ -72,7 +72,7 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 {
    * function will configure the requisite number of reducers to write HFiles for multple tables
    * simultaneously
    * @param job                   See {@link org.apache.hadoop.mapreduce.Job}
-   * @param multiTableDescriptors Table descriptor and region locator pairs n
+   * @param multiTableDescriptors Table descriptor and region locator pairs
    */
   public static void configureIncrementalLoad(Job job, List<TableInfo> multiTableDescriptors)
     throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
index 5a5d1149755..35c12672dea 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
@@ -76,8 +76,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
     boolean useWriteAheadLogging;
 
     /**
-     * n * HBaseConfiguration to used n * whether to use write ahead logging. This can be turned off
-     * ( <tt>false</tt>) to improve performance when bulk loading data.
+     * HBaseConfiguration to used whether to use write ahead logging. This can be turned off (
+     * <tt>false</tt>) to improve performance when bulk loading data.
      */
     public MultiTableRecordWriter(Configuration conf, boolean useWriteAheadLogging)
       throws IOException {
@@ -88,8 +88,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
     }
 
     /**
-     * n * the name of the table, as a string
-     * @return the named mutator n * if there is a problem opening a table
+     * the name of the table, as a string
+     * @return the named mutator if there is a problem opening a table
      */
     BufferedMutator getBufferedMutator(ImmutableBytesWritable tableName) throws IOException {
       if (this.connection == null) {
@@ -115,8 +115,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
     }
 
     /**
-     * Writes an action (Put or Delete) to the specified table. n * the table being updated. n * the
-     * update, either a put or a delete. n * if the action is not a put or a delete.
+     * Writes an action (Put or Delete) to the specified table. the table being updated. the update,
+     * either a put or a delete. if the action is not a put or a delete.
      */
     @Override
     public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
index 93dac05101c..7fdd68c3ad8 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
@@ -78,7 +78,7 @@ public class MultiTableSnapshotInputFormatImpl {
    * Return the list of splits extracted from the scans/snapshots pushed to conf by
    * {@link #setInput(Configuration, Map, Path)}
    * @param conf Configuration to determine splits from
-   * @return Return the list of splits extracted from the scans/snapshots pushed to conf n
+   * @return Return the list of splits extracted from the scans/snapshots pushed to conf
    */
   public List<TableSnapshotInputFormatImpl.InputSplit> getSplits(Configuration conf)
     throws IOException {
@@ -112,7 +112,7 @@ public class MultiTableSnapshotInputFormatImpl {
    * Retrieve the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration by
    * {@link #setSnapshotToScans(Configuration, Map)}
    * @param conf Configuration to extract name -&gt; list&lt;scan&gt; mappings from.
-   * @return the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration n
+   * @return the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration
    */
   public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException {
 
@@ -136,7 +136,7 @@ public class MultiTableSnapshotInputFormatImpl {
   }
 
   /**
-   * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) nnn
+   * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY})
    */
   public void setSnapshotToScans(Configuration conf, Map<String, Collection<Scan>> snapshotScans)
     throws IOException {
@@ -161,7 +161,7 @@ public class MultiTableSnapshotInputFormatImpl {
    * Retrieve the directories into which snapshots have been restored from
    * ({@link #RESTORE_DIRS_KEY})
    * @param conf Configuration to extract restore directories from
-   * @return the directories into which snapshots have been restored from n
+   * @return the directories into which snapshots have been restored from
    */
   public Map<String, Path> getSnapshotDirs(Configuration conf) throws IOException {
     List<Map.Entry<String, String>> kvps = ConfigurationUtil.getKeyValues(conf, RESTORE_DIRS_KEY);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
index 6258399472d..6d163e82e8c 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
@@ -248,7 +248,7 @@ public class RowCounter extends AbstractHBaseTool {
    * Sets filter {@link FilterBase} to the {@link Scan} instance. If provided rowRangeList contains
    * more than one element, method sets filter which is instance of {@link MultiRowRangeFilter}.
    * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. If rowRangeList
-   * contains exactly one element, startRow and stopRow are set to the scan. nn
+   * contains exactly one element, startRow and stopRow are set to the scan.
    */
   private static void setScanFilter(Scan scan, List<MultiRowRangeFilter.RowRange> rowRangeList) {
     final int size = rowRangeList == null ? 0 : rowRangeList.size();
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index da796e12738..b02517451bc 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -362,8 +362,7 @@ public abstract class TableInputFormatBase extends InputFormat<ImmutableBytesWri
    * @param split A TableSplit corresponding to a range of rowkeys
    * @param n     Number of ranges after splitting. Pass 1 means no split for the range Pass 2 if
    *              you want to split the range in two;
-   * @return A list of TableSplit, the size of the list is n
-   * @throws IllegalArgumentIOException throws IllegalArgumentIOException
+   * @return A list of TableSplit, the size of the list is {@code n}
    */
   protected List<InputSplit> createNInputSplitsUniform(InputSplit split, int n)
     throws IllegalArgumentIOException {
@@ -581,7 +580,7 @@ public abstract class TableInputFormatBase extends InputFormat<ImmutableBytesWri
   /**
    * Allows subclasses to initialize the table information.
    * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
-   * @param tableName  The {@link TableName} of the table to process. n
+   * @param tableName  The {@link TableName} of the table to process.
    */
   protected void initializeTable(Connection connection, TableName tableName) throws IOException {
     if (this.table != null || this.connection != null) {
@@ -642,7 +641,7 @@ public abstract class TableInputFormatBase extends InputFormat<ImmutableBytesWri
 
   /**
    * Close the Table and related objects that were initialized via
-   * {@link #initializeTable(Connection, TableName)}. n
+   * {@link #initializeTable(Connection, TableName)}.
    */
   protected void closeTable() throws IOException {
     close(admin, table, regionLocator, connection);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index cf57f0f860d..7027b566026 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -850,7 +850,7 @@ public class TableMapReduceUtil {
    * @param my_class        the class to find.
    * @param fs              the FileSystem with which to qualify the returned path.
    * @param packagedClasses a map of class name to path.
-   * @return a jar file that contains the class. n
+   * @return a jar file that contains the class.
    */
   private static Path findOrCreateJar(Class<?> my_class, FileSystem fs,
     Map<String, String> packagedClasses) throws IOException {
@@ -899,7 +899,7 @@ public class TableMapReduceUtil {
    * that is not the first thing on the class path that has a class with the same name. Looks first
    * on the classpath and then in the <code>packagedClasses</code> map.
    * @param my_class the class to find.
-   * @return a jar file that contains the class, or null. n
+   * @return a jar file that contains the class, or null.
    */
   private static String findContainingJar(Class<?> my_class, Map<String, String> packagedClasses)
     throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index e8316c5016f..17c6c0e4551 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -90,7 +90,8 @@ public class TableOutputFormat<KEY> extends OutputFormat<KEY, Mutation> implemen
     private BufferedMutator mutator;
 
     /**
-     * n *
+     *
+    *
      */
     public TableRecordWriter() throws IOException {
       String tableName = conf.get(OUTPUT_TABLE);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
index a0df98796b4..6b22ad1bb0f 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
@@ -70,7 +70,8 @@ public class TableRecordReader extends RecordReader<ImmutableBytesWritable, Resu
 
   /**
    * Returns the current key.
-   * @return The current key. n * @throws InterruptedException When the job is aborted.
+   * @return The current key.
+   * @throws InterruptedException When the job is aborted.
    * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey()
    */
   @Override
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
index 79dfe752be0..2fba0197858 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
@@ -92,7 +92,7 @@ public class TextSortReducer
    * Handles initializing this class with objects specific to it (i.e., the parser). Common
    * initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
    * subclass may choose to override this method and call <code>doSetup</code> as well before
-   * handling it's own custom params. n
+   * handling it's own custom params.
    */
   @Override
   protected void setup(Context context) {
@@ -107,7 +107,7 @@ public class TextSortReducer
   }
 
   /**
-   * Handles common parameter initialization that a subclass might want to leverage. nn
+   * Handles common parameter initialization that a subclass might want to leverage.
    */
   protected void doSetup(Context context, Configuration conf) {
     // If a custom separator has been used,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
index 04c7e87d3b4..fe3077fcf22 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
@@ -93,7 +93,7 @@ public class TsvImporterMapper extends Mapper<LongWritable, Text, ImmutableBytes
    * Handles initializing this class with objects specific to it (i.e., the parser). Common
    * initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
    * subclass may choose to override this method and call <code>doSetup</code> as well before
-   * handling it's own custom params. n
+   * handling it's own custom params.
    */
   @Override
   protected void setup(Context context) {
@@ -109,7 +109,7 @@ public class TsvImporterMapper extends Mapper<LongWritable, Text, ImmutableBytes
   }
 
   /**
-   * Handles common parameter initialization that a subclass might want to leverage. n
+   * Handles common parameter initialization that a subclass might want to leverage.
    */
   protected void doSetup(Context context) {
     Configuration conf = context.getConfiguration();
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
index 3ee760af74d..87ef096ad9e 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
@@ -61,7 +61,7 @@ public class TsvImporterTextMapper
    * Handles initializing this class with objects specific to it (i.e., the parser). Common
    * initialization that might be leveraged by a subclass is done in <code>doSetup</code>. Hence a
    * subclass may choose to override this method and call <code>doSetup</code> as well before
-   * handling it's own custom params. n
+   * handling it's own custom params.
    */
   @Override
   protected void setup(Context context) {
@@ -76,7 +76,7 @@ public class TsvImporterTextMapper
   }
 
   /**
-   * Handles common parameter initialization that a subclass might want to leverage. n
+   * Handles common parameter initialization that a subclass might want to leverage.
    */
   protected void doSetup(Context context) {
     Configuration conf = context.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index ee36048cb3e..83d6d2f7cba 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -262,7 +262,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
   interface Status {
     /**
      * Sets status
-     * @param msg status message n
+     * @param msg status message
      */
     void setStatus(final String msg) throws IOException;
   }
@@ -540,7 +540,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
   /*
    * Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write
    * out an input file with instruction per client regards which row they are to start on.
-   * @param cmd Command to run. n
+   * @param cmd Command to run.
    */
   static Job doMapReduce(TestOptions opts, final Configuration conf)
     throws IOException, InterruptedException, ClassNotFoundException {
@@ -591,7 +591,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
   /*
    * Write input file of offsets-per-client for the mapreduce job.
    * @param c Configuration
-   * @return Directory that contains file written whose name is JOB_INPUT_FILENAME n
+   * @return Directory that contains file written whose name is JOB_INPUT_FILENAME
    */
   static Path writeInputFile(final Configuration c, final TestOptions opts) throws IOException {
     return writeInputFile(c, opts, new Path("."));
@@ -1345,7 +1345,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
 
     /*
      * Run test
-     * @return Elapsed time. n
+     * @return Elapsed time.
      */
     long test() throws IOException, InterruptedException {
       testSetup();
@@ -2440,8 +2440,9 @@ public class PerformanceEvaluation extends Configured implements Tool {
   }
 
   /*
-   * Format passed integer. n * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version
-   * of passed number (Does absolute in case number is negative).
+   * Format passed integer.
+   * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed number (Does
+   * absolute in case number is negative).
    */
   public static byte[] format(final int number) {
     byte[] b = new byte[ROW_LENGTH];
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
index f9891067e6b..40a785abfb8 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
@@ -110,15 +110,15 @@ public class TestTableInputFormat {
   /**
    * Setup a table with two rows and values.
    * @param tableName the name of the table to create
-   * @return A Table instance for the created table. n
+   * @return A Table instance for the created table.
    */
   public static Table createTable(byte[] tableName) throws IOException {
     return createTable(tableName, new byte[][] { FAMILY });
   }
 
   /**
-   * Setup a table with two rows and values per column family. n * @return A Table instance for the
-   * created table. n
+   * Setup a table with two rows and values per column family.
+   * @return A Table instance for the created table.
    */
   public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
     Table table = UTIL.createTable(TableName.valueOf(tableName), families);
@@ -153,7 +153,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API. nn
+   * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API.
    */
   static void runTestMapred(Table table) throws IOException {
     org.apache.hadoop.hbase.mapred.TableRecordReader trr =
@@ -181,7 +181,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Create a table that IOE's on first scanner next call n
+   * Create a table that IOE's on first scanner next call
    */
   static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException {
     // build up a mock scanner stuff to fail the first time
@@ -212,7 +212,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Create a table that throws a DoNoRetryIOException on first scanner next call n
+   * Create a table that throws a DoNoRetryIOException on first scanner next call
    */
   static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException {
     // build up a mock scanner stuff to fail the first time
@@ -245,7 +245,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming no errors using mapred api. n
+   * Run test assuming no errors using mapred api.
    */
   @Test
   public void testTableRecordReader() throws IOException {
@@ -254,7 +254,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming Scanner IOException failure using mapred api, n
+   * Run test assuming Scanner IOException failure using mapred api,
    */
   @Test
   public void testTableRecordReaderScannerFail() throws IOException {
@@ -263,7 +263,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming Scanner IOException failure using mapred api, n
+   * Run test assuming Scanner IOException failure using mapred api,
    */
   @Test(expected = IOException.class)
   public void testTableRecordReaderScannerFailTwice() throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
index 285b3339373..a9d9c4974f2 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
@@ -200,7 +200,7 @@ public abstract class MultiTableInputFormatTestBase {
   }
 
   /**
-   * Tests a MR scan using specific start and stop rows. nnn
+   * Tests a MR scan using specific start and stop rows.
    */
   private void testScan(String start, String stop, String last)
     throws IOException, InterruptedException, ClassNotFoundException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java
index b4e1b91359a..9b207158817 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java
@@ -776,7 +776,7 @@ public class TestCellBasedHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#configureCompression(Configuration, HTableDescriptor)} and
    * {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests that the
-   * compression map is correctly serialized into and deserialized from configuration n
+   * compression map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -839,7 +839,7 @@ public class TestCellBasedHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#configureBloomType(HTableDescriptor, Configuration)} and
    * {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the compression
-   * map is correctly serialized into and deserialized from configuration n
+   * map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -899,7 +899,7 @@ public class TestCellBasedHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#configureBlockSize(HTableDescriptor, Configuration)} and
    * {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the compression
-   * map is correctly serialized into and deserialized from configuration n
+   * map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -962,7 +962,7 @@ public class TestCellBasedHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTableDescriptor, Configuration)}
    * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that the
-   * compression map is correctly serialized into and deserialized from configuration n
+   * compression map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java
index ac704788b42..c9fb941a202 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java
@@ -154,8 +154,8 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Runs an export job with the specified command line args n * @return true if job completed
-   * successfully nnn
+   * Runs an export job with the specified command line args
+   * @return true if job completed successfully
    */
   protected boolean runExport(String[] args) throws Throwable {
     // need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -168,8 +168,8 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Runs an import job with the specified command line args n * @return true if job completed
-   * successfully nnn
+   * Runs an import job with the specified command line args
+   * @return true if job completed successfully
    */
   boolean runImport(String[] args) throws Throwable {
     // need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -178,7 +178,7 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Test simple replication case with column mapping n
+   * Test simple replication case with column mapping
    */
   @Test
   public void testSimpleCase() throws Throwable {
@@ -230,7 +230,7 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Test export hbase:meta table n
+   * Test export hbase:meta table
    */
   @Test
   public void testMetaExport() throws Throwable {
@@ -240,7 +240,7 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Test import data from 0.94 exported file n
+   * Test import data from 0.94 exported file
    */
   @Test
   public void testImport94Table() throws Throwable {
@@ -489,7 +489,7 @@ public class TestCellBasedImportExport2 {
   }
 
   /**
-   * Count the number of keyvalues in the specified table for the given timerange nnn
+   * Count the number of keyvalues in the specified table for the given timerange
    */
   private int getCount(Table table, Filter filter) throws IOException {
     Scan scan = new Scan();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java
index 4bbacf04210..283acbabf6e 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java
@@ -107,7 +107,7 @@ public class TestCellBasedWALPlayer2 {
   }
 
   /**
-   * Simple end-to-end test n
+   * Simple end-to-end test
    */
   @Test
   public void testWALPlayer() throws Exception {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
index 40e9d19c5d4..8a811e6d654 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
@@ -180,7 +180,7 @@ public class TestCopyTable {
   }
 
   /**
-   * Simple end-to-end test n
+   * Simple end-to-end test
    */
   @Test
   public void testCopyTable() throws Exception {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 3c3bf0600d7..99b27f05630 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -810,7 +810,7 @@ public class TestHFileOutputFormat2 {
 
   /**
    * Test for {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests that the
-   * family compression map is correctly serialized into and deserialized from configuration n
+   * family compression map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -872,7 +872,7 @@ public class TestHFileOutputFormat2 {
 
   /**
    * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the
-   * family bloom type map is correctly serialized into and deserialized from configuration n
+   * family bloom type map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -931,7 +931,7 @@ public class TestHFileOutputFormat2 {
 
   /**
    * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the
-   * family block size map is correctly serialized into and deserialized from configuration n
+   * family block size map is correctly serialized into and deserialized from configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
@@ -994,7 +994,7 @@ public class TestHFileOutputFormat2 {
   /**
    * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that
    * the family data block encoding map is correctly serialized into and deserialized from
-   * configuration n
+   * configuration
    */
   @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
   @Test
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index 77aadf561e8..842bb3e6edb 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -173,8 +173,8 @@ public class TestImportExport {
   }
 
   /**
-   * Runs an export job with the specified command line args n * @return true if job completed
-   * successfully nnn
+   * Runs an export job with the specified command line args
+   * @return true if job completed successfully
    */
   protected boolean runExport(String[] args) throws Throwable {
     // need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -187,8 +187,8 @@ public class TestImportExport {
   }
 
   /**
-   * Runs an import job with the specified command line args n * @return true if job completed
-   * successfully nnn
+   * Runs an import job with the specified command line args
+   * @return true if job completed successfully
    */
   boolean runImport(String[] args) throws Throwable {
     // need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -197,7 +197,7 @@ public class TestImportExport {
   }
 
   /**
-   * Test simple replication case with column mapping n
+   * Test simple replication case with column mapping
    */
   @Test
   public void testSimpleCase() throws Throwable {
@@ -249,7 +249,7 @@ public class TestImportExport {
   }
 
   /**
-   * Test export hbase:meta table n
+   * Test export hbase:meta table
    */
   @Test
   public void testMetaExport() throws Throwable {
@@ -259,7 +259,7 @@ public class TestImportExport {
   }
 
   /**
-   * Test import data from 0.94 exported file n
+   * Test import data from 0.94 exported file
    */
   @Test
   public void testImport94Table() throws Throwable {
@@ -510,7 +510,7 @@ public class TestImportExport {
   /**
    * Count the number of keyvalues in the specified table with the given filter
    * @param table the table to scan
-   * @return the number of keyvalues found n
+   * @return the number of keyvalues found
    */
   private int getCount(Table table, Filter filter) throws IOException {
     Scan scan = new Scan();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
index 3ce402accd6..936cf90372b 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
@@ -154,9 +154,9 @@ public class TestImportTSVWithOperationAttributes implements Configurable {
   /**
    * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv
    * <code>Tool</code> instance so that other tests can inspect it for further validation as
-   * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n *
-   * Any arguments to pass BEFORE inputFile path is appended. n * @return The Tool instance used to
-   * run the test.
+   * necessary. This method is static to insure non-reliance on instance's util/conf facilities. Any
+   * arguments to pass BEFORE inputFile path is appended.
+   * @return The Tool instance used to run the test.
    */
   private Tool doMROnTableTest(HBaseTestingUtility util, String family, String data, String[] args,
     int valueMultiplier, boolean dataAvailable) throws Exception {
@@ -193,7 +193,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable {
   }
 
   /**
-   * Confirm ImportTsv via data in online table. n
+   * Confirm ImportTsv via data in online table.
    */
   private static void validateTable(Configuration conf, TableName tableName, String family,
     int valueMultiplier, boolean dataAvailable) throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
index f4281e6b708..2009b5c4bca 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
@@ -315,8 +315,8 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
   /**
    * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv
    * <code>Tool</code> instance so that other tests can inspect it for further validation as
-   * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n *
-   * Any arguments to pass BEFORE inputFile path is appended.
+   * necessary. This method is static to insure non-reliance on instance's util/conf facilities. Any
+   * arguments to pass BEFORE inputFile path is appended.
    * @param expectedKVCount Expected KV count. pass -1 to skip the kvcount check
    * @return The Tool instance used to run the test.
    */
@@ -460,7 +460,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
    * Method returns the total KVs in given hfile
    * @param fs File System
    * @param p  HFile path
-   * @return KV count in the given hfile n
+   * @return KV count in the given hfile
    */
   private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
     Configuration conf = util.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
index 51196d95370..737ae178b63 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
@@ -540,7 +540,7 @@ public class TestImportTsv implements Configurable {
    * Method returns the total KVs in given hfile
    * @param fs File System
    * @param p  HFile path
-   * @return KV count in the given hfile n
+   * @return KV count in the given hfile
    */
   private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
     Configuration conf = util.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
index 4ff1d1acad0..32da37f1f4f 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
@@ -79,7 +79,7 @@ public class TestMultiTableInputFormatBase {
   /**
    * Test getSplits only puts up one Connection. In past it has put up many Connections. Each
    * Connection setup comes with a fresh new cache so we have to do fresh hit on hbase:meta. Should
-   * only do one Connection when doing getSplits even if a MultiTableInputFormat. n
+   * only do one Connection when doing getSplits even if a MultiTableInputFormat.
    */
   @Test
   public void testMRSplitsConnectionCount() throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
index 647f243aed0..3c8e5b6f16c 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
@@ -93,7 +93,7 @@ public class TestMultithreadedTableMapper {
   public static class ProcessContentsMapper extends TableMapper<ImmutableBytesWritable, Put> {
 
     /**
-     * Pass the key, and reversed value to reduce nnnn
+     * Pass the key, and reversed value to reduce
      */
     @Override
     public void map(ImmutableBytesWritable key, Result value, Context context)
@@ -118,7 +118,7 @@ public class TestMultithreadedTableMapper {
   }
 
   /**
-   * Test multithreadedTableMappper map/reduce against a multi-region table nnn
+   * Test multithreadedTableMappper map/reduce against a multi-region table
    */
   @Test
   public void testMultithreadedTableMapper()
@@ -184,7 +184,8 @@ public class TestMultithreadedTableMapper {
   /**
    * Looks at every value of the mapreduce output and verifies that indeed the values have been
    * reversed.
-   * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value
+   * @param table Table to scan.
+   * @throws NullPointerException if we failed to find a cell value
    */
   private void verifyAttempt(final Table table) throws IOException, NullPointerException {
     Scan scan = new Scan();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
index 3fcb251392a..4404e3aee87 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
@@ -87,7 +87,7 @@ public class TestRowCounter {
   }
 
   /**
-   * Test a case when no column was specified in command line arguments. n
+   * Test a case when no column was specified in command line arguments.
    */
   @Test
   public void testRowCounterNoColumn() throws Exception {
@@ -96,7 +96,7 @@ public class TestRowCounter {
   }
 
   /**
-   * Test a case when the column specified in command line arguments is exclusive for few rows. n
+   * Test a case when the column specified in command line arguments is exclusive for few rows.
    */
   @Test
   public void testRowCounterExclusiveColumn() throws Exception {
@@ -106,7 +106,7 @@ public class TestRowCounter {
 
   /**
    * Test a case when the column specified in command line arguments is one for which the qualifier
-   * contains colons. n
+   * contains colons.
    */
   @Test
   public void testRowCounterColumnWithColonInQualifier() throws Exception {
@@ -116,7 +116,7 @@ public class TestRowCounter {
 
   /**
    * Test a case when the column specified in command line arguments is not part of first KV for a
-   * row. n
+   * row.
    */
   @Test
   public void testRowCounterHiddenColumn() throws Exception {
@@ -126,7 +126,7 @@ public class TestRowCounter {
 
   /**
    * Test a case when the column specified in command line arguments is exclusive for few rows and
-   * also a row range filter is specified n
+   * also a row range filter is specified
    */
   @Test
   public void testRowCounterColumnAndRowRange() throws Exception {
@@ -135,7 +135,7 @@ public class TestRowCounter {
   }
 
   /**
-   * Test a case when a range is specified with single range of start-end keys n
+   * Test a case when a range is specified with single range of start-end keys
    */
   @Test
   public void testRowCounterRowSingleRange() throws Exception {
@@ -144,7 +144,7 @@ public class TestRowCounter {
   }
 
   /**
-   * Test a case when a range is specified with single range with end key only n
+   * Test a case when a range is specified with single range with end key only
    */
   @Test
   public void testRowCounterRowSingleRangeUpperBound() throws Exception {
@@ -153,7 +153,7 @@ public class TestRowCounter {
   }
 
   /**
-   * Test a case when a range is specified with two ranges where one range is with end key only n
+   * Test a case when a range is specified with two ranges where one range is with end key only
    */
   @Test
   public void testRowCounterRowMultiRangeUpperBound() throws Exception {
@@ -162,7 +162,7 @@ public class TestRowCounter {
   }
 
   /**
-   * Test a case when a range is specified with multiple ranges of start-end keys n
+   * Test a case when a range is specified with multiple ranges of start-end keys
    */
   @Test
   public void testRowCounterRowMultiRange() throws Exception {
@@ -172,7 +172,7 @@ public class TestRowCounter {
 
   /**
    * Test a case when a range is specified with multiple ranges of start-end keys; one range is
-   * filled, another two are not n
+   * filled, another two are not
    */
   @Test
   public void testRowCounterRowMultiEmptyRange() throws Exception {
@@ -193,7 +193,7 @@ public class TestRowCounter {
   }
 
   /**
-   * Test a case when the timerange is specified with --starttime and --endtime options n
+   * Test a case when the timerange is specified with --starttime and --endtime options
    */
   @Test
   public void testRowCounterTimeRange() throws Exception {
@@ -241,7 +241,7 @@ public class TestRowCounter {
   /**
    * Run the RowCounter map reduce job and verify the row count.
    * @param args          the command line arguments to be used for rowcounter job.
-   * @param expectedCount the expected row count (result of map reduce job). n
+   * @param expectedCount the expected row count (result of map reduce job).
    */
   private void runRowCount(String[] args, int expectedCount) throws Exception {
     RowCounter rowCounter = new RowCounter();
@@ -433,7 +433,7 @@ public class TestRowCounter {
 
   /**
    * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have two columns, Few have
-   * one. nn
+   * one.
    */
   private static void writeRows(Table table, int totalRows, int rowsWithOneCol) throws IOException {
     final byte[] family = Bytes.toBytes(COL_FAM);
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
index 1986200c187..8d29415612f 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
@@ -105,15 +105,16 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Setup a table with two rows and values. n * @return A Table instance for the created table. n
+   * Setup a table with two rows and values.
+   * @return A Table instance for the created table.
    */
   public static Table createTable(byte[] tableName) throws IOException {
     return createTable(tableName, new byte[][] { FAMILY });
   }
 
   /**
-   * Setup a table with two rows and values per column family. n * @return A Table instance for the
-   * created table. n
+   * Setup a table with two rows and values per column family.
+   * @return A Table instance for the created table.
    */
   public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
     Table table = UTIL.createTable(TableName.valueOf(tableName), families);
@@ -148,7 +149,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API. nnn
+   * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API.
    */
   static void runTestMapreduce(Table table) throws IOException, InterruptedException {
     org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr =
@@ -182,7 +183,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Create a table that IOE's on first scanner next call n
+   * Create a table that IOE's on first scanner next call
    */
   static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException {
     // build up a mock scanner stuff to fail the first time
@@ -213,7 +214,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Create a table that throws a NotServingRegionException on first scanner next call n
+   * Create a table that throws a NotServingRegionException on first scanner next call
    */
   static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException {
     // build up a mock scanner stuff to fail the first time
@@ -246,7 +247,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming no errors using newer mapreduce api nn
+   * Run test assuming no errors using newer mapreduce api
    */
   @Test
   public void testTableRecordReaderMapreduce() throws IOException, InterruptedException {
@@ -255,7 +256,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming Scanner IOException failure using newer mapreduce api nn
+   * Run test assuming Scanner IOException failure using newer mapreduce api
    */
   @Test
   public void testTableRecordReaderScannerFailMapreduce() throws IOException, InterruptedException {
@@ -264,7 +265,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming Scanner IOException failure using newer mapreduce api nn
+   * Run test assuming Scanner IOException failure using newer mapreduce api
    */
   @Test(expected = IOException.class)
   public void testTableRecordReaderScannerFailMapreduceTwice()
@@ -274,8 +275,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming NotServingRegionException using newer mapreduce api n * @throws
-   * org.apache.hadoop.hbase.DoNotRetryIOException
+   * Run test assuming NotServingRegionException using newer mapreduce api
    */
   @Test
   public void testTableRecordReaderScannerTimeoutMapreduce()
@@ -285,8 +285,7 @@ public class TestTableInputFormat {
   }
 
   /**
-   * Run test assuming NotServingRegionException using newer mapreduce api n * @throws
-   * org.apache.hadoop.hbase.NotServingRegionException
+   * Run test assuming NotServingRegionException using newer mapreduce api
    */
   @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class)
   public void testTableRecordReaderScannerTimeoutMapreduceTwice()
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
index e1bd1626870..99606050667 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
@@ -76,7 +76,7 @@ public class TestTableMapReduce extends TestTableMapReduceBase {
   static class ProcessContentsMapper extends TableMapper<ImmutableBytesWritable, Put> {
 
     /**
-     * Pass the key, and reversed value to reduce nnnn
+     * Pass the key, and reversed value to reduce
      */
     @Override
     public void map(ImmutableBytesWritable key, Result value, Context context)
@@ -136,7 +136,7 @@ public class TestTableMapReduce extends TestTableMapReduceBase {
   }
 
   /**
-   * Verify scan counters are emitted from the job nn
+   * Verify scan counters are emitted from the job
    */
   private void verifyJobCountersAreEmitted(Job job) throws IOException {
     Counters counters = job.getCounters();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
index 536aa5d4cbc..3ff8bafea48 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
@@ -83,7 +83,7 @@ public abstract class TestTableMapReduceBase {
   }
 
   /**
-   * Test a map/reduce against a multi-region table n
+   * Test a map/reduce against a multi-region table
    */
   @Test
   public void testMultiRegionTable() throws IOException {
@@ -152,7 +152,8 @@ public abstract class TestTableMapReduceBase {
   /**
    * Looks at every value of the mapreduce output and verifies that indeed the values have been
    * reversed.
-   * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value
+   * @param table Table to scan.
+   * @throws NullPointerException if we failed to find a cell value
    */
   private void verifyAttempt(final Table table) throws IOException, NullPointerException {
     Scan scan = new Scan();
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
index f11c97c1952..0813d8fe5f8 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -144,8 +144,7 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool {
 
   /**
    * Pass one or more log file names and formatting options and it will dump out a text version of
-   * the contents on <code>stdout</code>. n * Command line arguments n * Thrown upon file system
-   * errors etc.
+   * the contents on <code>stdout</code>. Command line arguments Thrown upon file system errors etc.
    */
   @Override
   public int run(final String[] args) throws IOException {
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index f852066ed90..72bfe5daa55 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -65,7 +65,7 @@ public class ReplicationPeers {
    * Method called after a peer has been connected. It will create a ReplicationPeer to track the
    * newly connected cluster.
    * @param peerId a short that identifies the cluster
-   * @return whether a ReplicationPeer was successfully created n
+   * @return whether a ReplicationPeer was successfully created
    */
   public boolean addPeer(String peerId) throws ReplicationException {
     if (this.peerCache.containsKey(peerId)) {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
index 61dede2ae83..47852f4df2b 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
@@ -41,7 +41,7 @@ public class ExistsResource extends ResourceBase {
   TableResource tableResource;
 
   /**
-   * Constructor nn
+   * Constructor
    */
   public ExistsResource(TableResource tableResource) throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index 68d774e420c..cc5fb22265c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -44,7 +44,7 @@ public class MultiRowResource extends ResourceBase implements Constants {
   String[] columns = null;
 
   /**
-   * Constructor nn * @throws java.io.IOException
+   * Constructor
    */
   public MultiRowResource(TableResource tableResource, String versions, String columnsStr)
     throws IOException {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
index b661e46f928..355dac23c44 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
@@ -60,14 +60,14 @@ public class NamespacesInstanceResource extends ResourceBase {
   boolean queryTables = false;
 
   /**
-   * Constructor for standard NamespaceInstanceResource. n
+   * Constructor for standard NamespaceInstanceResource.
    */
   public NamespacesInstanceResource(String namespace) throws IOException {
     this(namespace, false);
   }
 
   /**
-   * Constructor for querying namespace table list via NamespaceInstanceResource. n
+   * Constructor for querying namespace table list via NamespaceInstanceResource.
    */
   public NamespacesInstanceResource(String namespace, boolean queryTables) throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
index a3c0e2d2f1a..aeccda24f19 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
@@ -44,7 +44,7 @@ public class NamespacesResource extends ResourceBase {
   private static final Logger LOG = LoggerFactory.getLogger(NamespacesResource.class);
 
   /**
-   * Constructor n
+   * Constructor
    */
   public NamespacesResource() throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
index 39a7ba71dd6..2e01ff24d47 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
@@ -32,7 +32,7 @@ public interface ProtobufMessageHandler {
   /**
    * Initialize the model from a protobuf representation.
    * @param message the raw bytes of the protobuf message
-   * @return reference to self for convenience n
+   * @return reference to self for convenience
    */
   ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException;
 }
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
index 79760aead9d..7212993fb8d 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
@@ -90,7 +90,7 @@ public class RESTServlet implements Constants {
   /**
    * Constructor with existing configuration
    * @param conf         existing configuration
-   * @param userProvider the login user provider n
+   * @param userProvider the login user provider
    */
   RESTServlet(final Configuration conf, final UserProvider userProvider) throws IOException {
     this.realUser = userProvider.getCurrent().getUGI();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
index 21c97302603..17beae40f7b 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
@@ -55,7 +55,7 @@ public class RegionsResource extends ResourceBase {
   TableResource tableResource;
 
   /**
-   * Constructor nn
+   * Constructor
    */
   public RegionsResource(TableResource tableResource) throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
index 9baf7aa7c04..babb3d1152c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
@@ -48,7 +48,7 @@ public class RootResource extends ResourceBase {
   }
 
   /**
-   * Constructor n
+   * Constructor
    */
   public RootResource() throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index 16259c34167..df4664b76c5 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -69,7 +69,7 @@ public class RowResource extends ResourceBase {
   private boolean returnResult = false;
 
   /**
-   * Constructor nnnnnn
+   * Constructor
    */
   public RowResource(TableResource tableResource, String rowspec, String versions, String check,
     String returnResult) throws IOException {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
index 1c2929aab7a..d15537b1c87 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
@@ -52,7 +52,7 @@ public class ScannerResource extends ResourceBase {
   TableResource tableResource;
 
   /**
-   * Constructor nn
+   * Constructor
    */
   public ScannerResource(TableResource tableResource) throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
index ee15cecccd3..8ead5a6f464 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
@@ -61,7 +61,7 @@ public class SchemaResource extends ResourceBase {
   TableResource tableResource;
 
   /**
-   * Constructor nn
+   * Constructor
    */
   public SchemaResource(TableResource tableResource) throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
index 85b3b3f6556..fe07c93c8a9 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -51,7 +51,7 @@ public class StorageClusterStatusResource extends ResourceBase {
   }
 
   /**
-   * Constructor n
+   * Constructor
    */
   public StorageClusterStatusResource() throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
index ea7641e54cd..00c243aec72 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
@@ -45,7 +45,7 @@ public class StorageClusterVersionResource extends ResourceBase {
   }
 
   /**
-   * Constructor n
+   * Constructor
    */
   public StorageClusterVersionResource() throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
index c40165c8254..24ced31d36a 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
@@ -46,7 +46,7 @@ public class TableResource extends ResourceBase {
   private static final Logger LOG = LoggerFactory.getLogger(TableResource.class);
 
   /**
-   * Constructor nn
+   * Constructor
    */
   public TableResource(String table) throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
index 8b71f708645..d78ba90cd8c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
@@ -53,7 +53,7 @@ public class VersionResource extends ResourceBase {
   }
 
   /**
-   * Constructor n
+   * Constructor
    */
   public VersionResource() throws IOException {
     super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
index 85cb2af86a8..3f406fb5d92 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
@@ -255,7 +255,7 @@ public class Client {
    * @param method  the transaction method
    * @param headers HTTP header values to send
    * @param path    the properly urlencoded path
-   * @return the HTTP response code n
+   * @return the HTTP response code
    */
   public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers,
     String path) throws IOException {
@@ -309,7 +309,7 @@ public class Client {
    * @param method  the transaction method
    * @param headers HTTP header values to send
    * @param uri     a properly urlencoded URI
-   * @return the HTTP response code n
+   * @return the HTTP response code
    */
   public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri)
     throws IOException {
@@ -348,7 +348,7 @@ public class Client {
    * @param method  the HTTP method
    * @param headers HTTP header values to send
    * @param path    the properly urlencoded path or URI
-   * @return the HTTP response code n
+   * @return the HTTP response code
    */
   public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path)
     throws IOException {
@@ -407,7 +407,7 @@ public class Client {
   /**
    * Send a HEAD request
    * @param path the path or URI
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response head(String path) throws IOException {
     return head(cluster, path, null);
@@ -418,7 +418,7 @@ public class Client {
    * @param cluster the cluster definition
    * @param path    the path or URI
    * @param headers the HTTP headers to include in the request
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response head(Cluster cluster, String path, Header[] headers) throws IOException {
     HttpHead method = new HttpHead(path);
@@ -433,7 +433,7 @@ public class Client {
   /**
    * Send a GET request
    * @param path the path or URI
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response get(String path) throws IOException {
     return get(cluster, path);
@@ -443,7 +443,7 @@ public class Client {
    * Send a GET request
    * @param cluster the cluster definition
    * @param path    the path or URI
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response get(Cluster cluster, String path) throws IOException {
     return get(cluster, path, EMPTY_HEADER_ARRAY);
@@ -453,7 +453,7 @@ public class Client {
    * Send a GET request
    * @param path   the path or URI
    * @param accept Accept header value
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response get(String path, String accept) throws IOException {
     return get(cluster, path, accept);
@@ -464,7 +464,7 @@ public class Client {
    * @param cluster the cluster definition
    * @param path    the path or URI
    * @param accept  Accept header value
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response get(Cluster cluster, String path, String accept) throws IOException {
     Header[] headers = new Header[1];
@@ -476,7 +476,7 @@ public class Client {
    * Send a GET request
    * @param path    the path or URI
    * @param headers the HTTP headers to include in the request, <tt>Accept</tt> must be supplied
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response get(String path, Header[] headers) throws IOException {
     return get(cluster, path, headers);
@@ -522,7 +522,7 @@ public class Client {
    * @param c       the cluster definition
    * @param path    the path or URI
    * @param headers the HTTP headers to include in the request
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response get(Cluster c, String path, Header[] headers) throws IOException {
     if (httpGet != null) {
@@ -539,7 +539,7 @@ public class Client {
    * @param path        the path or URI
    * @param contentType the content MIME type
    * @param content     the content bytes
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response put(String path, String contentType, byte[] content) throws IOException {
     return put(cluster, path, contentType, content);
@@ -551,7 +551,7 @@ public class Client {
    * @param contentType the content MIME type
    * @param content     the content bytes
    * @param extraHdr    extra Header to send
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response put(String path, String contentType, byte[] content, Header extraHdr)
     throws IOException {
@@ -600,7 +600,7 @@ public class Client {
    * @param path    the path or URI
    * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied
    * @param content the content bytes
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response put(String path, Header[] headers, byte[] content) throws IOException {
     return put(cluster, path, headers, content);
@@ -612,7 +612,7 @@ public class Client {
    * @param path    the path or URI
    * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied
    * @param content the content bytes
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response put(Cluster cluster, String path, Header[] headers, byte[] content)
     throws IOException {
@@ -633,7 +633,7 @@ public class Client {
    * @param path        the path or URI
    * @param contentType the content MIME type
    * @param content     the content bytes
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response post(String path, String contentType, byte[] content) throws IOException {
     return post(cluster, path, contentType, content);
@@ -645,7 +645,7 @@ public class Client {
    * @param contentType the content MIME type
    * @param content     the content bytes
    * @param extraHdr    additional Header to send
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response post(String path, String contentType, byte[] content, Header extraHdr)
     throws IOException {
@@ -694,7 +694,7 @@ public class Client {
    * @param path    the path or URI
    * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied
    * @param content the content bytes
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response post(String path, Header[] headers, byte[] content) throws IOException {
     return post(cluster, path, headers, content);
@@ -706,7 +706,7 @@ public class Client {
    * @param path    the path or URI
    * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied
    * @param content the content bytes
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response post(Cluster cluster, String path, Header[] headers, byte[] content)
     throws IOException {
@@ -725,7 +725,7 @@ public class Client {
   /**
    * Send a DELETE request
    * @param path the path or URI
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response delete(String path) throws IOException {
     return delete(cluster, path);
@@ -735,7 +735,7 @@ public class Client {
    * Send a DELETE request
    * @param path     the path or URI
    * @param extraHdr additional Header to send
-   * @return a Response object with response detail n
+   * @return a Response object with response detail
    */
   public Response delete(String path, Header extraHdr) throws IOException {
     return delete(cluster, path, extraHdr);
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
index 47e67dbea5a..9071c31614c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
@@ -139,7 +139,7 @@ public class RestCsrfPreventionFilter implements Filter {
     String getHeader(String header);
 
     /**
-     * Returns the method. n
+     * Returns the method.
      */
     String getMethod();
 
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
index 6e2391546bb..349d2a2c328 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
@@ -80,21 +80,21 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
   }
 
   /**
-   * Constructor nn
+   * Constructor
    */
   public CellModel(byte[] column, byte[] value) {
     this(column, HConstants.LATEST_TIMESTAMP, value);
   }
 
   /**
-   * Constructor nnn
+   * Constructor
    */
   public CellModel(byte[] column, byte[] qualifier, byte[] value) {
     this(column, qualifier, HConstants.LATEST_TIMESTAMP, value);
   }
 
   /**
-   * Constructor from KeyValue n
+   * Constructor from KeyValue
    */
   public CellModel(org.apache.hadoop.hbase.Cell cell) {
     this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(),
@@ -102,7 +102,7 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
   }
 
   /**
-   * Constructor nnn
+   * Constructor
    */
   public CellModel(byte[] column, long timestamp, byte[] value) {
     this.column = column;
@@ -111,7 +111,7 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
   }
 
   /**
-   * Constructor nnnn
+   * Constructor
    */
   public CellModel(byte[] column, byte[] qualifier, long timestamp, byte[] value) {
     this.column = CellUtil.makeColumn(column, qualifier);
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
index 475f9185976..39ea0b7c39b 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
@@ -63,7 +63,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan
 
   /**
    * Constructor to use if namespace does not exist in HBASE.
-   * @param namespaceName the namespace name. n
+   * @param namespaceName the namespace name.
    */
   public NamespacesInstanceModel(String namespaceName) throws IOException {
     this(null, namespaceName);
@@ -72,7 +72,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan
   /**
    * Constructor
    * @param admin         the administrative API
-   * @param namespaceName the namespace name. n
+   * @param namespaceName the namespace name.
    */
   public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOException {
     this.namespaceName = namespaceName;
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
index 3b0e2d1e58b..76a7b32e137 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
@@ -57,7 +57,7 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler {
 
   /**
    * Constructor
-   * @param admin the administrative API n
+   * @param admin the administrative API
    */
   public NamespacesModel(Admin admin) throws IOException {
     NamespaceDescriptor[] nds = admin.listNamespaceDescriptors();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
index 2eba8d4cad8..831c7849abb 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
@@ -505,7 +505,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
 
   /**
    * @param s the JSON representation of the filter
-   * @return the filter n
+   * @return the filter
    */
   public static Filter buildFilter(String s) throws Exception {
     FilterModel model =
@@ -516,7 +516,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
 
   /**
    * @param filter the filter
-   * @return the JSON representation of the filter n
+   * @return the JSON representation of the filter
    */
   public static String stringifyFilter(final Filter filter) throws Exception {
     return getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE)
@@ -526,7 +526,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
   private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":");
 
   /**
-   * @param scan the scan specification n
+   * @param scan the scan specification
    */
   public static ScannerModel fromScan(Scan scan) throws Exception {
     ScannerModel model = new ScannerModel();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
index 37f7f79f89f..6b39daaacd6 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
@@ -58,7 +58,7 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler {
   }
 
   /**
-   * Constructor n
+   * Constructor
    */
   public TableInfoModel(String name) {
     this.name = name;
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
index 51a2bc567cd..32459738002 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
@@ -48,7 +48,7 @@ public class TableModel implements Serializable {
   }
 
   /**
-   * Constructor n
+   * Constructor
    */
   public TableModel(String name) {
     super();
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
index 9e076d433f0..610eb31cfee 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
@@ -52,7 +52,7 @@ public class RemoteAdmin {
   private static volatile Unmarshaller versionClusterUnmarshaller;
 
   /**
-   * Constructor nn
+   * Constructor
    */
   public RemoteAdmin(Client client, Configuration conf) {
     this(client, conf, null);
@@ -69,7 +69,7 @@ public class RemoteAdmin {
   }
 
   /**
-   * Constructor nnn
+   * Constructor
    */
   public RemoteAdmin(Client client, Configuration conf, String accessToken) {
     this.client = client;
@@ -89,8 +89,8 @@ public class RemoteAdmin {
   }
 
   /**
-   * @return string representing the rest api's version n * if the endpoint does not exist, there is
-   *         a timeout, or some other general failure mode
+   * @return string representing the rest api's version if the endpoint does not exist, there is a
+   *         timeout, or some other general failure mode
    */
   public VersionModel getRestVersion() throws IOException {
 
@@ -169,8 +169,8 @@ public class RemoteAdmin {
   }
 
   /**
-   * @return string representing the cluster's version n * if the endpoint does not exist, there is
-   *         a timeout, or some other general failure mode
+   * @return string representing the cluster's version if the endpoint does not exist, there is a
+   *         timeout, or some other general failure mode
    */
   public StorageClusterVersionModel getClusterVersion() throws IOException {
 
@@ -336,8 +336,8 @@ public class RemoteAdmin {
   }
 
   /**
-   * @return string representing the cluster's version n * if the endpoint does not exist, there is
-   *         a timeout, or some other general failure mode
+   * @return string representing the cluster's version if the endpoint does not exist, there is a
+   *         timeout, or some other general failure mode
    */
   public TableListModel getTableList() throws IOException {
 
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 3a19934d749..de1bb561327 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -574,6 +574,47 @@ public class TestRemoteTable {
     assertTrue(response.hasBody());
   }
 
+  /**
+   * Tests scanner with limitation limit the number of rows each scanner scan fetch at life time The
+   * number of rows returned should be equal to the limit
+   */
+  @Test
+  public void testLimitedScan() throws Exception {
+    int numTrials = 100;
+    int limit = 60;
+
+    // Truncate the test table for inserting test scenarios rows keys
+    TEST_UTIL.getAdmin().disableTable(TABLE);
+    TEST_UTIL.getAdmin().truncateTable(TABLE, false);
+    String row = "testrow";
+
+    try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) {
+      List<Put> puts = new ArrayList<>();
+      Put put = null;
+      for (int i = 1; i <= numTrials; i++) {
+        put = new Put(Bytes.toBytes(row + i));
+        put.addColumn(COLUMN_1, QUALIFIER_1, TS_2, Bytes.toBytes("testvalue" + i));
+        puts.add(put);
+      }
+      table.put(puts);
+    }
+
+    remoteTable =
+      new RemoteHTable(new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())),
+        TEST_UTIL.getConfiguration(), TABLE.toBytes());
+
+    Scan scan = new Scan();
+    scan.setLimit(limit);
+    ResultScanner scanner = remoteTable.getScanner(scan);
+    Iterator<Result> resultIterator = scanner.iterator();
+    int counter = 0;
+    while (resultIterator.hasNext()) {
+      resultIterator.next();
+      counter++;
+    }
+    assertEquals(limit, counter);
+  }
+
   /**
    * Tests keeping a HBase scanner alive for long periods of time. Each call to next() should reset
    * the ConnectionCache timeout for the scanner's connection.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java
index 44498200991..5eca7afd28b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java
@@ -34,7 +34,7 @@ class HealthReport {
   }
 
   /**
-   * Gets the status of the region server. n
+   * Gets the status of the region server.
    */
   HealthCheckerExitStatus getStatus() {
     return status;
@@ -46,7 +46,7 @@ class HealthReport {
   }
 
   /**
-   * Gets the health report of the region server. n
+   * Gets the health report of the region server.
    */
   String getHealthReport() {
     return healthReport;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index a803b6aee9d..816ef997cbd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -66,7 +66,7 @@ public class LocalHBaseCluster {
   private final Class<? extends HRegionServer> regionServerClass;
 
   /**
-   * Constructor. nn
+   * Constructor.
    */
   public LocalHBaseCluster(final Configuration conf) throws IOException {
     this(conf, DEFAULT_NO);
@@ -75,7 +75,7 @@ public class LocalHBaseCluster {
   /**
    * Constructor.
    * @param conf            Configuration to use. Post construction has the master's address.
-   * @param noRegionServers Count of regionservers to start. n
+   * @param noRegionServers Count of regionservers to start.
    */
   public LocalHBaseCluster(final Configuration conf, final int noRegionServers) throws IOException {
     this(conf, 1, 0, noRegionServers, getMasterImplementation(conf),
@@ -86,7 +86,7 @@ public class LocalHBaseCluster {
    * Constructor.
    * @param conf            Configuration to use. Post construction has the active master address.
    * @param noMasters       Count of masters to start.
-   * @param noRegionServers Count of regionservers to start. n
+   * @param noRegionServers Count of regionservers to start.
    */
   public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers)
     throws IOException {
@@ -116,7 +116,7 @@ public class LocalHBaseCluster {
    * Constructor.
    * @param conf            Configuration to use. Post construction has the master's address.
    * @param noMasters       Count of masters to start.
-   * @param noRegionServers Count of regionservers to start. nnn
+   * @param noRegionServers Count of regionservers to start.
    */
   @SuppressWarnings("unchecked")
   public LocalHBaseCluster(final Configuration conf, final int noMasters,
@@ -240,9 +240,7 @@ public class LocalHBaseCluster {
     });
   }
 
-  /**
-   * n * @return region server
-   */
+  /** Returns region server */
   public HRegionServer getRegionServer(int serverNumber) {
     return regionThreads.get(serverNumber).getRegionServer();
   }
@@ -427,7 +425,7 @@ public class LocalHBaseCluster {
   }
 
   /**
-   * Test things basically work. nn
+   * Test things basically work.
    */
   public static void main(String[] args) throws IOException {
     Configuration conf = HBaseConfiguration.create();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
index 47f6938652d..62da616acb5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
@@ -30,7 +30,7 @@ public interface RegionStateListener {
   // state than introduce a whole new listening mechanism? St.Ack
   /**
    * Process region split event.
-   * @param hri An instance of RegionInfo n
+   * @param hri An instance of RegionInfo
    */
   void onRegionSplit(RegionInfo hri) throws IOException;
 
@@ -42,7 +42,7 @@ public interface RegionStateListener {
   void onRegionSplitReverted(RegionInfo hri) throws IOException;
 
   /**
-   * Process region merge event. n
+   * Process region merge event.
    */
   void onRegionMerged(RegionInfo mergedRegion) throws IOException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
index 280ad3b7c47..d9bec2e3d81 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
@@ -146,8 +146,8 @@ public class SplitLogTask {
 
   /**
    * @param data Serialized date to parse.
-   * @return An SplitLogTaskState instance made of the passed <code>data</code> n * @see
-   *         #toByteArray()
+   * @return An SplitLogTaskState instance made of the passed <code>data</code>
+   * @see #toByteArray()
    */
   public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException {
     ProtobufUtil.expectPBMagicPrefix(data);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index 68dc87502e0..8615efe6a7e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -692,12 +692,12 @@ public class HFileArchiver {
 
     /**
      * @return if this is a directory, returns all the children in the directory, otherwise returns
-     *         an empty list n
+     *         an empty list
      */
     abstract Collection<File> getChildren() throws IOException;
 
     /**
-     * close any outside readers of the file n
+     * close any outside readers of the file
      */
     abstract void close() throws IOException;
 
@@ -708,7 +708,8 @@ public class HFileArchiver {
     abstract Path getPath();
 
     /**
-     * Move the file to the given destination n * @return <tt>true</tt> on success n
+     * Move the file to the given destination
+     * @return <tt>true</tt> on success
      */
     public boolean moveAndClose(Path dest) throws IOException {
       this.close();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
index e9703cb8cb2..f9ef4326f7f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
@@ -71,8 +71,7 @@ public final class VersionInfoUtil {
   }
 
   /**
-   * n * @return the passed-in <code>version</code> int as a version String (e.g. 0x0103004 is
-   * 1.3.4)
+   * Returns the passed-in <code>version</code> int as a version String (e.g. 0x0103004 is 1.3.4)
    */
   public static String versionNumberToString(final int version) {
     return String.format("%d.%d.%d", ((version >> 20) & 0xff), ((version >> 12) & 0xff),
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
index 25182fd457b..47c1e4d6dcb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
@@ -74,8 +74,8 @@ public final class Constraints {
    * Enable constraints on a table.
    * <p>
    * Currently, if you attempt to add a constraint to the table, then Constraints will automatically
-   * be turned on. n * table description to add the processor n * If the {@link ConstraintProcessor}
-   * CP couldn't be added to the table.
+   * be turned on. table description to add the processor If the {@link ConstraintProcessor} CP
+   * couldn't be added to the table.
    */
   public static void enable(HTableDescriptor desc) throws IOException {
     // if the CP has already been loaded, do nothing
@@ -90,7 +90,7 @@ public final class Constraints {
 
   /**
    * Turn off processing constraints for a given table, even if constraints have been turned on or
-   * added. n * {@link HTableDescriptor} where to disable {@link Constraint Constraints}.
+   * added. {@link HTableDescriptor} where to disable {@link Constraint Constraints}.
    */
   public static void disable(HTableDescriptor desc) {
     desc.removeCoprocessor(ConstraintProcessor.class.getName());
@@ -100,8 +100,8 @@ public final class Constraints {
    * Remove all {@link Constraint Constraints} that have been added to the table and turn off the
    * constraint processing.
    * <p>
-   * All {@link Configuration Configurations} and their associated {@link Constraint} are removed. n
-   * * {@link HTableDescriptor} to remove {@link Constraint Constraints} from.
+   * All {@link Configuration Configurations} and their associated {@link Constraint} are removed.
+   * @param desc {@link HTableDescriptor} to remove {@link Constraint Constraints} from.
    */
   public static void remove(HTableDescriptor desc) {
     // disable constraints
@@ -124,7 +124,7 @@ public final class Constraints {
   }
 
   /**
-   * Check to see if the Constraint is currently set. n * {@link HTableDescriptor} to check n *
+   * Check to see if the Constraint is currently set. {@link HTableDescriptor} to check
    * {@link Constraint} class to check for.
    * @return <tt>true</tt> if the {@link Constraint} is present, even if it is disabled.
    *         <tt>false</tt> otherwise.
@@ -158,10 +158,9 @@ public final class Constraints {
    * Each constraint, when added to the table, will have a specific priority, dictating the order in
    * which the {@link Constraint} will be run. A {@link Constraint} earlier in the list will be run
    * before those later in the list. The same logic applies between two Constraints over time
-   * (earlier added is run first on the regionserver). n * {@link HTableDescriptor} to add
-   * {@link Constraint Constraints} n * {@link Constraint Constraints} to add. All constraints are
-   * considered automatically enabled on add n * If constraint could not be serialized/added to
-   * table
+   * (earlier added is run first on the regionserver). {@link HTableDescriptor} to add
+   * {@link Constraint Constraints} {@link Constraint Constraints} to add. All constraints are
+   * considered automatically enabled on add If constraint could not be serialized/added to table
    */
   public static void add(HTableDescriptor desc, Class<? extends Constraint>... constraints)
     throws IOException {
@@ -184,10 +183,10 @@ public final class Constraints {
    * Each constraint, when added to the table, will have a specific priority, dictating the order in
    * which the {@link Constraint} will be run. A {@link Constraint} earlier in the list will be run
    * before those later in the list. The same logic applies between two Constraints over time
-   * (earlier added is run first on the regionserver). n * {@link HTableDescriptor} to add a
-   * {@link Constraint} n * {@link Pair} of a {@link Constraint} and its associated
+   * (earlier added is run first on the regionserver). {@link HTableDescriptor} to add a
+   * {@link Constraint} {@link Pair} of a {@link Constraint} and its associated
    * {@link Configuration}. The Constraint will be configured on load with the specified
-   * configuration.All constraints are considered automatically enabled on add n * if any constraint
+   * configuration.All constraints are considered automatically enabled on add if any constraint
    * could not be deserialized. Assumes if 1 constraint is not loaded properly, something has gone
    * terribly wrong and that all constraints need to be enforced.
    */
@@ -206,10 +205,10 @@ public final class Constraints {
    * <p>
    * Each constraint, when added to the table, will have a specific priority, dictating the order in
    * which the {@link Constraint} will be run. A {@link Constraint} added will run on the
-   * regionserver before those added to the {@link HTableDescriptor} later. n * table descriptor to
-   * the constraint to n * to be added n * configuration associated with the constraint n * if any
-   * constraint could not be deserialized. Assumes if 1 constraint is not loaded properly, something
-   * has gone terribly wrong and that all constraints need to be enforced.
+   * regionserver before those added to the {@link HTableDescriptor} later. table descriptor to the
+   * constraint to to be added configuration associated with the constraint if any constraint could
+   * not be deserialized. Assumes if 1 constraint is not loaded properly, something has gone
+   * terribly wrong and that all constraints need to be enforced.
    */
   public static void add(HTableDescriptor desc, Class<? extends Constraint> constraint,
     Configuration conf) throws IOException {
@@ -234,9 +233,8 @@ public final class Constraints {
   }
 
   /**
-   * Setup the configuration for a constraint as to whether it is enabled and its priority n * on
-   * which to base the new configuration n * <tt>true</tt> if it should be run n * relative to other
-   * constraints
+   * Setup the configuration for a constraint as to whether it is enabled and its priority on which
+   * to base the new configuration <tt>true</tt> if it should be run relative to other constraints
    * @return a new configuration, storable in the {@link HTableDescriptor}
    */
   private static Configuration configure(Configuration conf, boolean enabled, long priority) {
@@ -257,7 +255,7 @@ public final class Constraints {
 
   /**
    * Just write the class to a String representation of the class as a key for the
-   * {@link HTableDescriptor} n * Constraint class to convert to a {@link HTableDescriptor} key
+   * {@link HTableDescriptor} Constraint class to convert to a {@link HTableDescriptor} key
    * @return key to store in the {@link HTableDescriptor}
    */
   private static String serializeConstraintClass(Class<? extends Constraint> clazz) {
@@ -275,8 +273,8 @@ public final class Constraints {
   }
 
   /**
-   * Write the configuration to a String n * to write
-   * @return String representation of that configuration n
+   * Write the configuration to a String to write
+   * @return String representation of that configuration
    */
   private static String serializeConfiguration(Configuration conf) throws IOException {
     // write the configuration out to the data stream
@@ -289,7 +287,7 @@ public final class Constraints {
   }
 
   /**
-   * Read the {@link Configuration} stored in the byte stream. n * to read from
+   * Read the {@link Configuration} stored in the byte stream. to read from
    * @return A valid configuration
    */
   private static Configuration readConfiguration(byte[] bytes) throws IOException {
@@ -300,8 +298,8 @@ public final class Constraints {
   }
 
   /**
-   * Read in the configuration from the String encoded configuration n * to read from
-   * @return A valid configuration n * if the configuration could not be read
+   * Read in the configuration from the String encoded configuration to read from
+   * @return A valid configuration if the configuration could not be read
    */
   private static Configuration readConfiguration(String bytes) throws IOException {
     return readConfiguration(Bytes.toBytes(bytes));
@@ -328,9 +326,9 @@ public final class Constraints {
 
   /**
    * Update the configuration for the {@link Constraint}; does not change the order in which the
-   * constraint is run. n * {@link HTableDescriptor} to update n * {@link Constraint} to update n *
-   * to update the {@link Constraint} with. n * if the Constraint was not stored correctly n * if
-   * the Constraint was not present on this table.
+   * constraint is run. {@link HTableDescriptor} to update {@link Constraint} to update to update
+   * the {@link Constraint} with. if the Constraint was not stored correctly if the Constraint was
+   * not present on this table.
    */
   public static void setConfiguration(HTableDescriptor desc, Class<? extends Constraint> clazz,
     Configuration configuration) throws IOException, IllegalArgumentException {
@@ -357,8 +355,8 @@ public final class Constraints {
   }
 
   /**
-   * Remove the constraint (and associated information) for the table descriptor. n *
-   * {@link HTableDescriptor} to modify n * {@link Constraint} class to remove
+   * Remove the constraint (and associated information) for the table descriptor.
+   * {@link HTableDescriptor} to modify {@link Constraint} class to remove
    */
   public static void remove(HTableDescriptor desc, Class<? extends Constraint> clazz) {
     String key = serializeConstraintClass(clazz);
@@ -367,9 +365,8 @@ public final class Constraints {
 
   /**
    * Enable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the
-   * {@link Constraint}, but makes sure that it gets loaded on the table. n *
-   * {@link HTableDescriptor} to modify n * {@link Constraint} to enable n * If the constraint
-   * cannot be properly deserialized
+   * {@link Constraint}, but makes sure that it gets loaded on the table. {@link HTableDescriptor}
+   * to modify {@link Constraint} to enable If the constraint cannot be properly deserialized
    */
   public static void enableConstraint(HTableDescriptor desc, Class<? extends Constraint> clazz)
     throws IOException {
@@ -378,9 +375,9 @@ public final class Constraints {
 
   /**
    * Disable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the
-   * {@link Constraint}, but it just doesn't load the {@link Constraint} on the table. n *
-   * {@link HTableDescriptor} to modify n * {@link Constraint} to disable. n * if the constraint
-   * cannot be found
+   * {@link Constraint}, but it just doesn't load the {@link Constraint} on the table.
+   * {@link HTableDescriptor} to modify {@link Constraint} to disable. if the constraint cannot be
+   * found
    */
   public static void disableConstraint(HTableDescriptor desc, Class<? extends Constraint> clazz)
     throws IOException {
@@ -410,10 +407,10 @@ public final class Constraints {
   }
 
   /**
-   * Check to see if the given constraint is enabled. n * {@link HTableDescriptor} to check. n *
+   * Check to see if the given constraint is enabled. {@link HTableDescriptor} to check.
    * {@link Constraint} to check for
    * @return <tt>true</tt> if the {@link Constraint} is present and enabled. <tt>false</tt>
-   *         otherwise. n * If the constraint has improperly stored in the table
+   *         otherwise. If the constraint has improperly stored in the table
    */
   public static boolean enabled(HTableDescriptor desc, Class<? extends Constraint> clazz)
     throws IOException {
@@ -431,11 +428,11 @@ public final class Constraints {
   }
 
   /**
-   * Get the constraints stored in the table descriptor n * To read from n * To use when loading
-   * classes. If a special classloader is used on a region, for instance, then that should be the
-   * classloader used to load the constraints. This could also apply to unit-testing situation,
-   * where want to ensure that class is reloaded or not.
-   * @return List of configured {@link Constraint Constraints} n * if any part of reading/arguments
+   * Get the constraints stored in the table descriptor To read from To use when loading classes. If
+   * a special classloader is used on a region, for instance, then that should be the classloader
+   * used to load the constraints. This could also apply to unit-testing situation, where want to
+   * ensure that class is reloaded or not.
+   * @return List of configured {@link Constraint Constraints} if any part of reading/arguments
    *         fails
    */
   static List<? extends Constraint> getConstraints(TableDescriptor desc, ClassLoader classloader)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
index 29aa273b2b3..a7f813aeea0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
@@ -131,7 +131,7 @@ public interface SplitLogManagerCoordination {
   void deleteTask(String taskName);
 
   /**
-   * Support method to init constants such as timeout. Mostly required for UTs. n
+   * Support method to init constants such as timeout. Mostly required for UTs.
    */
   void init() throws IOException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index d08a7238e81..f5faabc7e2d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -139,7 +139,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener
    * It is possible for a task to stay in UNASSIGNED state indefinitely - say SplitLogManager wants
    * to resubmit a task. It forces the task to UNASSIGNED state but it dies before it could create
    * the RESCAN task node to signal the SplitLogWorkers to pick up the task. To prevent this
-   * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup. n
+   * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup.
    */
   private void handleUnassignedTask(String path) {
     if (ZKSplitLog.isRescanNode(watcher, path)) {
@@ -551,7 +551,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener
      * partially done tasks are present. taskname is the name of the task that was put up in
      * zookeeper.
      * <p>
-     * nn * @return DONE if task completed successfully, ERR otherwise
+     * @return DONE if task completed successfully, ERR otherwise
      */
     Status finish(ServerName workerName, String taskname);
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index 7acb0891dbc..6def70f9714 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -374,7 +374,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLog
    * in a cluster.
    * <p>
    * Synchronization using <code>taskReadySeq</code> ensures that it will try to grab every task
-   * that has been put up n
+   * that has been put up
    */
   @Override
   public void taskLoop() throws InterruptedException {
@@ -534,7 +534,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLog
    */
   /**
    * endTask() can fail and the only way to recover out of it is for the
-   * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. nn
+   * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
    */
   @Override
   public void endTask(SplitLogTask slt, LongAdder ctr, SplitTaskDetails details) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
index cc8977f4581..c1ba9e274ad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
@@ -655,7 +655,7 @@ public abstract class CoprocessorHost<C extends Coprocessor, E extends Coprocess
    * may remain shutdown if any exception occurs during next coprocessor execution which prevent
    * master/regionserver stop or cluster shutdown. (Refer:
    * <a href="https://issues.apache.org/jira/browse/HBASE-16663">HBASE-16663</a>
-   * @return true if bypaas coprocessor execution, false if not. n
+   * @return true if bypaas coprocessor execution, false if not.
    */
   protected <O> boolean execShutdown(final ObserverOperation<O> observerOperation)
     throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 0e73808a8ca..a49f5764843 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -547,8 +547,8 @@ public interface MasterObserver {
 
   /**
    * Called prior to unassigning a given region.
-   * @param ctx the environment to interact with the framework and master n * @param force whether
-   *            to force unassignment or not
+   * @param ctx   the environment to interact with the framework and master
+   * @param force whether to force unassignment or not
    * @deprecated in 2.4.0. replaced by preUnassign(ctx, regionInfo). removed in hbase 3. until then
    *             safe to either leave implementation here or move it to the new method. default impl
    *             of that method calls this one.
@@ -559,7 +559,7 @@ public interface MasterObserver {
 
   /**
    * Called prior to unassigning a given region.
-   * @param ctx the environment to interact with the framework and master n
+   * @param ctx the environment to interact with the framework and master
    */
   default void preUnassign(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     final RegionInfo regionInfo) throws IOException {
@@ -568,8 +568,8 @@ public interface MasterObserver {
 
   /**
    * Called after the region unassignment has been requested.
-   * @param ctx the environment to interact with the framework and master n * @param force whether
-   *            to force unassignment or not
+   * @param ctx   the environment to interact with the framework and master
+   * @param force whether to force unassignment or not
    * @deprecated in 2.4.0. replaced by postUnassign(ctx, regionInfo). removed in hbase 3. until then
    *             safe to either leave implementation here or move it to the new method. default impl
    *             of that method calls this one.
@@ -580,7 +580,7 @@ public interface MasterObserver {
 
   /**
    * Called after the region unassignment has been requested.
-   * @param ctx the environment to interact with the framework and master n
+   * @param ctx the environment to interact with the framework and master
    */
   default void postUnassign(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     final RegionInfo regionInfo) throws IOException {
@@ -589,7 +589,7 @@ public interface MasterObserver {
 
   /**
    * Called prior to marking a given region as offline.
-   * @param ctx the environment to interact with the framework and master n
+   * @param ctx the environment to interact with the framework and master
    */
   default void preRegionOffline(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     final RegionInfo regionInfo) throws IOException {
@@ -597,7 +597,7 @@ public interface MasterObserver {
 
   /**
    * Called after the region has been marked offline.
-   * @param ctx the environment to interact with the framework and master n
+   * @param ctx the environment to interact with the framework and master
    */
   default void postRegionOffline(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     final RegionInfo regionInfo) throws IOException {
@@ -677,7 +677,7 @@ public interface MasterObserver {
 
   /**
    * This will be called before update META step as part of split transaction.
-   * @param ctx the environment to interact with the framework and master nn
+   * @param ctx the environment to interact with the framework and master
    */
   default void preSplitRegionBeforeMETAAction(
     final ObserverContext<MasterCoprocessorEnvironment> ctx, final byte[] splitKey,
@@ -1421,67 +1421,72 @@ public interface MasterObserver {
   }
 
   /**
-   * Called before remove a replication peer n * @param peerId a short name that identifies the peer
+   * Called before remove a replication peer
+   * @param peerId a short name that identifies the peer
    */
   default void preRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     String peerId) throws IOException {
   }
 
   /**
-   * Called after remove a replication peer n * @param peerId a short name that identifies the peer
+   * Called after remove a replication peer
+   * @param peerId a short name that identifies the peer
    */
   default void postRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     String peerId) throws IOException {
   }
 
   /**
-   * Called before enable a replication peer n * @param peerId a short name that identifies the peer
+   * Called before enable a replication peer
+   * @param peerId a short name that identifies the peer
    */
   default void preEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     String peerId) throws IOException {
   }
 
   /**
-   * Called after enable a replication peer n * @param peerId a short name that identifies the peer
+   * Called after enable a replication peer
+   * @param peerId a short name that identifies the peer
    */
   default void postEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     String peerId) throws IOException {
   }
 
   /**
-   * Called before disable a replication peer n * @param peerId a short name that identifies the
-   * peer
+   * Called before disable a replication peer
+   * @param peerId a short name that identifies the peer
    */
   default void preDisableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     String peerId) throws IOException {
   }
 
   /**
-   * Called after disable a replication peer n * @param peerId a short name that identifies the peer
+   * Called after disable a replication peer
+   * @param peerId a short name that identifies the peer
    */
   default void postDisableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     String peerId) throws IOException {
   }
 
   /**
-   * Called before get the configured ReplicationPeerConfig for the specified peer n * @param peerId
-   * a short name that identifies the peer
+   * Called before get the configured ReplicationPeerConfig for the specified peer
+   * @param peerId a short name that identifies the peer
    */
   default void preGetReplicationPeerConfig(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     String peerId) throws IOException {
   }
 
   /**
-   * Called after get the configured ReplicationPeerConfig for the specified peer n * @param peerId
-   * a short name that identifies the peer
+   * Called after get the configured ReplicationPeerConfig for the specified peer
+   * @param peerId a short name that identifies the peer
    */
   default void postGetReplicationPeerConfig(final ObserverContext<MasterCoprocessorEnvironment> ctx,
     String peerId) throws IOException {
   }
 
   /**
-   * Called before update peerConfig for the specified peer n * @param peerId a short name that
-   * identifies the peer
+   * Called before update peerConfig for the specified peer
+   * @param peerId a short name that identifies the peer
    */
   default void preUpdateReplicationPeerConfig(
     final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index be382d3a55c..5f66c03f1c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -570,15 +570,15 @@ public interface RegionObserver {
 
   /**
    * This will be called for region operations where read lock is acquired in
-   * {@link Region#startRegionOperation()}. n * @param operation The operation is about to be taken
-   * on the region
+   * {@link Region#startRegionOperation()}.
+   * @param operation The operation is about to be taken on the region
    */
   default void postStartRegionOperation(ObserverContext<RegionCoprocessorEnvironment> ctx,
     Operation operation) throws IOException {
   }
 
   /**
-   * Called after releasing read lock in {@link Region#closeRegionOperation()}. nn
+   * Called after releasing read lock in {@link Region#closeRegionOperation()}.
    */
   default void postCloseRegionOperation(ObserverContext<RegionCoprocessorEnvironment> ctx,
     Operation operation) throws IOException {
@@ -589,8 +589,8 @@ public interface RegionObserver {
    * batch operation fails.
    * <p>
    * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If
-   * need a Cell reference for later use, copy the cell and use that. nn * @param success true if
-   * batch operation is successful otherwise false.
+   * need a Cell reference for later use, copy the cell and use that.
+   * @param success true if batch operation is successful otherwise false.
    */
   default void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> ctx,
     MiniBatchOperationInProgress<Mutation> miniBatchOp, boolean success) throws IOException {
@@ -1463,8 +1463,8 @@ public interface RegionObserver {
    * @param fs     fileystem to read from
    * @param p      path to the file
    * @param in     {@link FSDataInputStreamWrapper}
-   * @param size   Full size of the file n * @param r original reference file. This will be not null
-   *               only when reading a split file.
+   * @param size   Full size of the file
+   * @param r      original reference file. This will be not null only when reading a split file.
    * @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain
    * @return a Reader instance to use instead of the base reader if overriding default behavior,
    *         null otherwise
@@ -1485,8 +1485,8 @@ public interface RegionObserver {
    * @param fs     fileystem to read from
    * @param p      path to the file
    * @param in     {@link FSDataInputStreamWrapper}
-   * @param size   Full size of the file n * @param r original reference file. This will be not null
-   *               only when reading a split file.
+   * @param size   Full size of the file
+   * @param r      original reference file. This will be not null only when reading a split file.
    * @param reader the base reader instance
    * @return The reader to use
    * @deprecated For Phoenix only, StoreFileReader is not a stable interface.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
index 19fa8adc1e3..8c02b346f3c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
@@ -49,7 +49,7 @@ public class ForeignException extends IOException {
 
   /**
    * Create a new ForeignException that can be serialized. It is assumed that this came form a local
-   * source. nn
+   * source.
    */
   public ForeignException(String source, Throwable cause) {
     super(cause);
@@ -60,7 +60,7 @@ public class ForeignException extends IOException {
 
   /**
    * Create a new ForeignException that can be serialized. It is assumed that this is locally
-   * generated. nn
+   * generated.
    */
   public ForeignException(String source, String msg) {
     super(new IllegalArgumentException(msg));
@@ -146,8 +146,8 @@ public class ForeignException extends IOException {
   }
 
   /**
-   * Takes a series of bytes and tries to generate an ForeignException instance for it. n * @return
-   * the ForeignExcpetion instance
+   * Takes a series of bytes and tries to generate an ForeignException instance for it.
+   * @return the ForeignExcpetion instance
    * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown.
    */
   public static ForeignException deserialize(byte[] bytes) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
index 3718900cc87..09fb78468dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
@@ -40,7 +40,7 @@ public interface ForeignExceptionSnare {
 
   /**
    * Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is no
-   * exception this is a no-op n * all exceptions from remote sources are procedure exceptions
+   * exception this is a no-op all exceptions from remote sources are procedure exceptions
    */
   void rethrowException() throws ForeignException;
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
index 94418f0c381..ece244fda4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
@@ -106,7 +106,7 @@ public abstract class EventHandler implements Runnable, Comparable<EventHandler>
   }
 
   /**
-   * This method is the main processing loop to be implemented by the various subclasses. n
+   * This method is the main processing loop to be implemented by the various subclasses.
    */
   public abstract void process() throws IOException;
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
index a60a1ed7512..5ddd9316cd3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
@@ -259,7 +259,7 @@ public class ExecutorService {
     }
 
     /**
-     * Submit the event to the queue for handling. n
+     * Submit the event to the queue for handling.
      */
     void submit(final EventHandler event) {
       // If there is a listener for this type, make sure we call the before
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index f98c1da3782..46ff38b9f66 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -182,9 +182,7 @@ public class FavoredNodeAssignmentHelper {
     return servers;
   }
 
-  /**
-   * n * @return PB'ed bytes of {@link FavoredNodes} generated by the server list.
-   */
+  /** Returns PB'ed bytes of {@link FavoredNodes} generated by the server list. */
   public static byte[] getFavoredNodes(List<ServerName> serverAddrList) {
     FavoredNodes.Builder f = FavoredNodes.newBuilder();
     for (ServerName s : serverAddrList) {
@@ -317,8 +315,8 @@ public class FavoredNodeAssignmentHelper {
 
   /**
    * For regions that share the primary, avoid placing the secondary and tertiary on a same RS. Used
-   * for generating new assignments for the primary/secondary/tertiary RegionServers n * @return the
-   * map of regions to the servers the region-files should be hosted on
+   * for generating new assignments for the primary/secondary/tertiary RegionServers
+   * @return the map of regions to the servers the region-files should be hosted on
    */
   public Map<RegionInfo, ServerName[]>
     placeSecondaryAndTertiaryWithRestrictions(Map<RegionInfo, ServerName> primaryRSMap) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
index 0f0304ab9db..13e5c6c2a41 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
@@ -78,7 +78,7 @@ public class FavoredNodesPlan {
 
   /**
    * Return the position of the server in the favoredNodes list. Assumes the favoredNodes list is of
-   * size 3. n
+   * size 3.
    */
   public static Position getFavoredServerPosition(List<ServerName> favoredNodes,
     ServerName server) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
index ed3986f5883..337fde60cf7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
@@ -67,23 +67,19 @@ public class Reference {
     bottom
   }
 
-  /**
-   * n * @return A {@link Reference} that points at top half of a an hfile
-   */
+  /** Returns A {@link Reference} that points at top half of a an hfile */
   public static Reference createTopReference(final byte[] splitRow) {
     return new Reference(splitRow, Range.top);
   }
 
-  /**
-   * n * @return A {@link Reference} that points at the bottom half of a an hfile
-   */
+  /** Returns A {@link Reference} that points at the bottom half of a an hfile */
   public static Reference createBottomReference(final byte[] splitRow) {
     return new Reference(splitRow, Range.bottom);
   }
 
   /**
    * Constructor
-   * @param splitRow This is row we are splitting around. n
+   * @param splitRow This is row we are splitting around.
    */
   Reference(final byte[] splitRow, final Range fr) {
     this.splitkey = splitRow == null ? null : KeyValueUtil.createFirstOnRow(splitRow).getKey();
@@ -102,15 +98,13 @@ public class Reference {
   }
 
   /**
-   * n
-   */
+   *   */
   public Range getFileRegion() {
     return this.region;
   }
 
   /**
-   * n
-   */
+   *   */
   public byte[] getSplitKey() {
     return splitkey;
   }
@@ -151,7 +145,8 @@ public class Reference {
   }
 
   /**
-   * Read a Reference from FileSystem. nn * @return New Reference made from passed <code>p</code> n
+   * Read a Reference from FileSystem.
+   * @return New Reference made from passed <code>p</code>
    */
   public static Reference read(final FileSystem fs, final Path p) throws IOException {
     InputStream in = fs.open(p);
@@ -198,7 +193,7 @@ public class Reference {
   /**
    * Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the
    * delimiter, pb reads to EOF which may not be what you want).
-   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n
+   * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
    */
   byte[] toByteArray() throws IOException {
     return ProtobufUtil.prependPBMagic(convert().toByteArray());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index 1caa1b76f5f..6c05402f5a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -80,7 +80,7 @@ public interface BlockCache extends Iterable<CachedBlock> {
   int evictBlocksByHfileName(String hfileName);
 
   /**
-   * Get the statistics for this block cache. n
+   * Get the statistics for this block cache.
    */
   CacheStats getStats();
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index daa49d26a23..e6a4b609bc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -74,9 +74,7 @@ public class BlockCacheUtil {
       }
     }).setPrettyPrinting().create();
 
-  /**
-   * n * @return The block content as String.
-   */
+  /** Returns The block content as String. */
   public static String toString(final CachedBlock cb, final long now) {
     return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now);
   }
@@ -142,9 +140,7 @@ public class BlockCacheUtil {
     return GSON.toJson(bc);
   }
 
-  /**
-   * n * @return The block content of <code>bc</code> as a String minus the filename.
-   */
+  /** Returns The block content of <code>bc</code> as a String minus the filename. */
   public static String toStringMinusFileName(final CachedBlock cb, final long now) {
     return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age="
       + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority="
@@ -281,9 +277,7 @@ public class BlockCacheUtil {
       new ConcurrentSkipListMap<>();
     FastLongHistogram hist = new FastLongHistogram();
 
-    /**
-     * n * @return True if full.... if we won't be adding any more.
-     */
+    /** Returns True if full.... if we won't be adding any more. */
     public boolean update(final CachedBlock cb) {
       if (isFull()) return true;
       NavigableSet<CachedBlock> set = this.cachedBlockByFile.get(cb.getFilename());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java
index a90e04fe5ad..1b2fdc64197 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java
@@ -52,7 +52,7 @@ public interface BlockCompressedSizePredicator {
   /**
    * Decides if the block should be finished based on the comparison of its uncompressed size
    * against an adjusted size based on a predicated compression factor.
-   * @param uncompressed true if the block should be finished. n
+   * @param uncompressed true if the block should be finished.
    */
   boolean shouldFinishBlock(int uncompressed);
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
index 2fe50381b77..4e5dfe34df6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
@@ -31,7 +31,7 @@ public interface CacheableDeserializer<T extends Cacheable> {
   /**
    * @param b         ByteBuff to deserialize the Cacheable.
    * @param allocator to manage NIO ByteBuffers for future allocation or de-allocation.
-   * @return T the deserialized object. n
+   * @return T the deserialized object.
    */
   T deserialize(ByteBuff b, ByteBuffAllocator allocator) throws IOException;
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
index ab7ac822a98..97f418fd3f9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
@@ -82,9 +82,9 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase
   private BloomType bloomType;
 
   /**
-   * n * each chunk's size in bytes. The real chunk size might be different as required by the fold
-   * factor. n * target false positive rate n * hash function type to use n * maximum degree of
-   * folding allowed n * the bloom type
+   * each chunk's size in bytes. The real chunk size might be different as required by the fold
+   * factor. target false positive rate hash function type to use maximum degree of folding allowed
+   * the bloom type
    */
   public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, int hashType,
     int maxFold, boolean cacheOnWrite, CellComparator comparator, BloomType bloomType) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 6e72890be12..b5a5095c336 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -138,7 +138,7 @@ public class HFileBlockIndex {
     }
 
     /**
-     * n * from 0 to {@link #getRootBlockCount() - 1}
+     * from 0 to {@link #getRootBlockCount() - 1}
      */
     public byte[] getRootBlockKey(int i) {
       return blockKeys[i];
@@ -256,7 +256,7 @@ public class HFileBlockIndex {
     }
 
     /**
-     * n * from 0 to {@link #getRootBlockCount() - 1}
+     * from 0 to {@link #getRootBlockCount() - 1}
      */
     public Cell getRootBlockKey(int i) {
       return blockKeys[i];
@@ -521,7 +521,7 @@ public class HFileBlockIndex {
     }
 
     /**
-     * n * from 0 to {@link #getRootBlockCount() - 1}
+     * from 0 to {@link #getRootBlockCount() - 1}
      */
     public Cell getRootBlockKey(int i) {
       return seeker.getRootBlockKey(i);
@@ -600,12 +600,12 @@ public class HFileBlockIndex {
     /**
      * Return the data block which contains this key. This function will only be called when the
      * HFile version is larger than 1.
-     * @param key          the key we are looking for
-     * @param currentBlock the current block, to avoid re-reading the same block nnn * @param
-     *                     expectedDataBlockEncoding the data block encoding the caller is expecting
-     *                     the data block to be in, or null to not perform this check and return the
-     *                     block irrespective of the encoding
-     * @return reader a basic way to load blocks n
+     * @param key                       the key we are looking for
+     * @param currentBlock              the current block, to avoid re-reading the same block
+     * @param expectedDataBlockEncoding the data block encoding the caller is expecting the data
+     *                                  block to be in, or null to not perform this check and return
+     *                                  the block irrespective of the encoding
+     * @return reader a basic way to load blocks
      */
     public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks,
       boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding,
@@ -629,7 +629,7 @@ public class HFileBlockIndex {
      *                                  block to be in, or null to not perform this check and return
      *                                  the block irrespective of the encoding.
      * @return the BlockWithScanInfo which contains the DataBlock with other scan info such as
-     *         nextIndexedKey. n
+     *         nextIndexedKey.
      */
     public abstract BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock,
       boolean cacheBlocks, boolean pread, boolean isCompaction,
@@ -665,8 +665,8 @@ public class HFileBlockIndex {
     }
 
     /**
-     * Finds the root-level index block containing the given key. n * Key to find n * the comparator
-     * to be used
+     * Finds the root-level index block containing the given key. Key to find the comparator to be
+     * used
      * @return Offset of block containing <code>key</code> (between 0 and the number of blocks - 1)
      *         or -1 if this file does not contain the request.
      */
@@ -677,7 +677,7 @@ public class HFileBlockIndex {
       CellComparator comp);
 
     /**
-     * Finds the root-level index block containing the given key. n * Key to find
+     * Finds the root-level index block containing the given key. Key to find
      * @return Offset of block containing <code>key</code> (between 0 and the number of blocks - 1)
      *         or -1 if this file does not contain the request.
      */
@@ -690,13 +690,13 @@ public class HFileBlockIndex {
     }
 
     /**
-     * Finds the root-level index block containing the given key. n * Key to find
+     * Finds the root-level index block containing the given key. Key to find
      */
     public abstract int rootBlockContainingKey(final Cell key);
... 10805 lines suppressed ...