You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@storm.apache.org by sr...@apache.org on 2019/07/07 16:51:44 UTC

[storm] branch master updated: STORM-3440: client: fix all checkstyle warnings (#3055)

This is an automated email from the ASF dual-hosted git repository.

srdo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/storm.git


The following commit(s) were added to refs/heads/master by this push:
     new 925422a  STORM-3440: client: fix all checkstyle warnings (#3055)
925422a is described below

commit 925422a5b5ad1c3329a2c2b44db460ae94f70806
Author: Karl-Philipp Richter <kr...@posteo.de>
AuthorDate: Sun Jul 7 18:51:36 2019 +0200

    STORM-3440: client: fix all checkstyle warnings (#3055)
    
    * STORM-3440: client: fix all checkstyle warnings
---
 .../storm/starter/streams/AggregateExample.java    |   8 +-
 .../storm/starter/streams/TypedTupleExample.java   |   2 +-
 .../java/org/apache/storm/hdfs/spout/Configs.java  |  44 +-
 storm-client/pom.xml                               |   2 +-
 storm-client/src/jvm/org/apache/storm/Config.java  | 814 +++++++++++----------
 .../src/jvm/org/apache/storm/ILocalCluster.java    |  29 +-
 .../src/jvm/org/apache/storm/ILocalDRPC.java       |   7 +-
 .../src/jvm/org/apache/storm/StormSubmitter.java   |  99 ++-
 .../src/jvm/org/apache/storm/StormTimer.java       |   8 +-
 storm-client/src/jvm/org/apache/storm/Thrift.java  |  14 +-
 .../storm/annotation/InterfaceStability.java       |   1 -
 .../assignments/InMemoryAssignmentBackend.java     |   1 +
 .../jvm/org/apache/storm/blobstore/BlobStore.java  | 279 +++----
 .../storm/blobstore/BlobStoreAclHandler.java       |  52 +-
 .../apache/storm/blobstore/ClientBlobStore.java    |  95 +--
 .../apache/storm/blobstore/NimbusBlobStore.java    |  13 +-
 .../src/jvm/org/apache/storm/bolt/JoinBolt.java    |  38 +-
 .../storm/callback/ZKStateChangedCallback.java     |   1 +
 .../jvm/org/apache/storm/cluster/ClusterUtils.java |  28 +-
 .../org/apache/storm/cluster/IStateStorage.java    |  16 +-
 .../apache/storm/cluster/IStormClusterState.java   |   7 +
 .../storm/cluster/PaceMakerStateStorage.java       |  12 +-
 .../cluster/PaceMakerStateStorageFactory.java      |   4 +-
 .../apache/storm/cluster/StateStorageFactory.java  |   2 +-
 .../storm/cluster/StormClusterStateImpl.java       |  19 +-
 .../org/apache/storm/cluster/ZKStateStorage.java   |   1 +
 .../storm/cluster/ZKStateStorageFactory.java       |   5 +-
 .../storm/container/cgroup/CgroupCenter.java       |   6 +-
 .../storm/container/cgroup/CgroupCommon.java       |   3 -
 .../container/cgroup/CgroupCommonOperation.java    |  22 +-
 .../storm/container/cgroup/CgroupOperation.java    |  20 +-
 .../apache/storm/container/cgroup/CgroupUtils.java |   4 +-
 .../org/apache/storm/container/cgroup/Device.java  |   2 +-
 .../apache/storm/container/cgroup/Hierarchy.java   |   6 +-
 .../apache/storm/container/cgroup/SubSystem.java   |   9 +-
 .../storm/container/cgroup/SubSystemType.java      |   2 +-
 .../storm/container/cgroup/SystemOperation.java    |  16 +-
 .../storm/container/cgroup/core/BlkioCore.java     |  12 +
 .../storm/coordination/BatchBoltExecutor.java      |  36 +-
 .../storm/coordination/BatchOutputCollector.java   |   2 +-
 .../coordination/BatchOutputCollectorImpl.java     |  16 +-
 .../apache/storm/coordination/CoordinatedBolt.java | 166 +++--
 .../org/apache/storm/daemon/GrouperFactory.java    |   2 +-
 .../jvm/org/apache/storm/daemon/StormCommon.java   |   6 +-
 .../storm/daemon/supervisor/AdvancedFSOps.java     |  55 +-
 .../daemon/supervisor/ClientSupervisorUtils.java   |   5 +-
 .../storm/daemon/supervisor/ExitCodeCallback.java  |   2 +-
 .../storm/daemon/supervisor/IAdvancedFSOps.java    |  31 +-
 .../storm/daemon/worker/BackPressureTracker.java   |  22 +-
 .../jvm/org/apache/storm/daemon/worker/Worker.java |  43 +-
 .../apache/storm/daemon/worker/WorkerState.java    |  49 +-
 .../storm/dependency/DependencyBlobStoreUtils.java |   1 +
 .../storm/dependency/DependencyUploader.java       |   4 +-
 .../apache/storm/drpc/DRPCInvocationsClient.java   |   5 +-
 .../src/jvm/org/apache/storm/drpc/DRPCSpout.java   |  78 +-
 .../src/jvm/org/apache/storm/drpc/JoinResult.java  |  16 +-
 .../jvm/org/apache/storm/drpc/KeyedFairBolt.java   |  40 +-
 .../apache/storm/drpc/LinearDRPCInputDeclarer.java |   1 +
 .../storm/drpc/LinearDRPCTopologyBuilder.java      |   2 +-
 .../jvm/org/apache/storm/drpc/ReturnResults.java   |  36 +-
 .../jvm/org/apache/storm/executor/Executor.java    |  58 +-
 .../apache/storm/executor/ExecutorTransfer.java    |   2 +-
 .../apache/storm/executor/bolt/BoltExecutor.java   |  12 +-
 .../executor/bolt/BoltOutputCollectorImpl.java     |   6 +-
 .../apache/storm/executor/spout/SpoutExecutor.java |   6 +-
 .../executor/spout/SpoutOutputCollectorImpl.java   |   8 +-
 .../storm/grouping/CustomStreamGrouping.java       |   2 +-
 .../src/jvm/org/apache/storm/grouping/Load.java    |   6 +-
 .../jvm/org/apache/storm/grouping/LoadMapping.java |  16 +-
 .../apache/storm/grouping/PartialKeyGrouping.java  |  16 +-
 .../jvm/org/apache/storm/hooks/BaseWorkerHook.java |   8 +-
 .../jvm/org/apache/storm/hooks/IWorkerHook.java    |   4 +-
 .../storm/messaging/ConnectionWithStatus.java      |   2 +-
 .../messaging/DeserializingConnectionCallback.java |   4 +-
 .../org/apache/storm/messaging/IConnection.java    |  10 +-
 .../storm/messaging/IConnectionCallback.java       |   2 +-
 .../jvm/org/apache/storm/messaging/IContext.java   |  20 +-
 .../org/apache/storm/messaging/TaskMessage.java    |  24 +-
 .../apache/storm/messaging/TransportFactory.java   |   8 +-
 .../org/apache/storm/messaging/local/Context.java  |  55 +-
 .../org/apache/storm/messaging/netty/Client.java   |   8 +-
 .../org/apache/storm/messaging/netty/Context.java  |  12 +-
 .../storm/messaging/netty/ControlMessage.java      |   9 +-
 .../messaging/netty/KerberosSaslClientHandler.java |  14 +-
 .../messaging/netty/KerberosSaslNettyClient.java   |  27 +-
 .../netty/KerberosSaslNettyClientState.java        |   3 +-
 .../messaging/netty/KerberosSaslNettyServer.java   |  34 +-
 .../netty/KerberosSaslNettyServerState.java        |   3 +-
 .../messaging/netty/KerberosSaslServerHandler.java |   8 +-
 .../org/apache/storm/messaging/netty/Login.java    |  78 +-
 .../apache/storm/messaging/netty/MessageBatch.java |  39 +-
 .../storm/messaging/netty/MessageDecoder.java      |   8 +-
 .../storm/messaging/netty/SaslMessageToken.java    |  39 +-
 .../storm/messaging/netty/SaslNettyClient.java     |   5 +-
 .../storm/messaging/netty/SaslNettyServer.java     |   4 +-
 .../messaging/netty/SaslStormClientHandler.java    |   7 +-
 .../messaging/netty/SaslStormServerHandler.java    |   1 +
 .../org/apache/storm/messaging/netty/Server.java   |  16 +-
 .../storm/messaging/netty/StormClientHandler.java  |   6 +-
 .../storm/messaging/netty/StormServerHandler.java  |   6 +-
 .../org/apache/storm/metric/EventLoggerBolt.java   |   8 +-
 .../jvm/org/apache/storm/metric/IEventLogger.java  |   2 +-
 .../storm/metric/LoggingMetricsConsumer.java       |  16 +-
 .../apache/storm/metric/MetricsConsumerBolt.java   |  77 +-
 .../jvm/org/apache/storm/metric/SystemBolt.java    |  42 +-
 .../apache/storm/metric/api/AssignableMetric.java  |   8 +-
 .../apache/storm/metric/api/CombinedMetric.java    |  14 +-
 .../org/apache/storm/metric/api/CountMetric.java   |  10 +-
 .../jvm/org/apache/storm/metric/api/IMetric.java   |   1 +
 .../apache/storm/metric/api/IMetricsConsumer.java  |   6 +-
 .../org/apache/storm/metric/api/MeanReducer.java   |   5 -
 .../api/MeanReducerState.java}                     |  19 +-
 .../apache/storm/metric/api/MultiCountMetric.java  |   8 +-
 .../storm/metric/api/MultiReducedMetric.java       |  12 +-
 .../org/apache/storm/metric/api/ReducedMetric.java |  14 +-
 .../org/apache/storm/metric/api/StateMetric.java   |   6 +-
 .../storm/metric/api/rpc/CountShellMetric.java     |  11 +-
 .../apache/storm/metric/api/rpc/IShellMetric.java  |   9 +-
 .../org/apache/storm/metric/cgroup/CGroupCpu.java  |   4 +-
 .../storm/metric/cgroup/CGroupMemoryLimit.java     |  10 +-
 .../storm/metric/cgroup/CGroupMemoryUsage.java     |   2 +-
 .../storm/metric/internal/CountStatAndMetric.java  | 114 +--
 .../metric/internal/LatencyStatAndMetric.java      | 179 ++---
 .../storm/metric/internal/MetricStatTimer.java     |   4 +-
 .../metric/internal/MultiCountStatAndMetric.java   |  24 +-
 .../metric/internal/MultiLatencyStatAndMetric.java |  24 +-
 .../apache/storm/metric/internal/RateTracker.java  |  69 +-
 .../jvm/org/apache/storm/multilang/BoltMsg.java    |   5 +-
 .../org/apache/storm/multilang/ISerializer.java    |  10 +-
 .../org/apache/storm/multilang/JsonSerializer.java |   4 +-
 .../jvm/org/apache/storm/multilang/ShellMsg.java   |  28 +-
 .../jvm/org/apache/storm/multilang/SpoutMsg.java   |   2 +-
 .../AbstractDNSToSwitchMapping.java                |  10 +-
 .../networktopography/DNSToSwitchMapping.java      |  18 +-
 .../DefaultRackDNSToSwitchMapping.java             |   1 +
 .../jvm/org/apache/storm/nimbus/NimbusInfo.java    |  10 +-
 .../apache/storm/pacemaker/PacemakerClient.java    |  42 +-
 .../storm/pacemaker/codec/ThriftDecoder.java       |   2 +-
 .../storm/pacemaker/codec/ThriftEncoder.java       |  22 +-
 .../pacemaker/codec/ThriftNettyClientCodec.java    |  50 +-
 .../jvm/org/apache/storm/policy/IWaitStrategy.java |  19 +-
 .../org/apache/storm/policy/WaitStrategyPark.java  |   8 +-
 .../storm/policy/WaitStrategyProgressive.java      |  17 +-
 .../org/apache/storm/security/auth/AutoSSL.java    |  17 +-
 .../storm/security/auth/ClientAuthUtils.java       |  55 +-
 .../security/auth/DefaultPrincipalToLocal.java     |   2 +-
 .../storm/security/auth/FixedGroupsMapping.java    |  10 +-
 .../apache/storm/security/auth/IAuthorizer.java    |   8 +-
 .../auth/IGroupMappingServiceProvider.java         |   6 +-
 .../storm/security/auth/IPrincipalToLocal.java     |   6 +-
 .../storm/security/auth/ITransportPlugin.java      |  12 +-
 .../security/auth/KerberosPrincipalToLocal.java    |   4 +-
 .../org/apache/storm/security/auth/ReqContext.java |  50 +-
 .../security/auth/ShellBasedGroupsMapping.java     |   4 +-
 .../storm/security/auth/SimpleTransportPlugin.java |  44 +-
 .../storm/security/auth/SingleUserPrincipal.java   |  10 +-
 .../storm/security/auth/TBackoffConnect.java       |  18 +-
 .../apache/storm/security/auth/ThriftClient.java   |  69 +-
 .../storm/security/auth/ThriftConnectionType.java  |  14 +-
 .../apache/storm/security/auth/ThriftServer.java   |   2 +
 .../auth/authorizer/DRPCAuthorizerBase.java        |  14 +-
 .../auth/authorizer/DRPCSimpleACLAuthorizer.java   |  74 +-
 .../security/auth/authorizer/DenyAuthorizer.java   |   6 +-
 .../auth/authorizer/ImpersonationAuthorizer.java   |  48 +-
 .../security/auth/authorizer/NoopAuthorizer.java   |   4 +-
 .../auth/authorizer/SimpleACLAuthorizer.java       |   1 +
 .../auth/authorizer/SimpleWhitelistAuthorizer.java |   4 +-
 .../authorizer/SupervisorSimpleACLAuthorizer.java  |   1 +
 .../storm/security/auth/kerberos/AutoTGT.java      |  31 +-
 .../auth/kerberos/AutoTGTKrb5LoginModule.java      |   3 +-
 .../auth/kerberos/AutoTGTKrb5LoginModuleTest.java  |   1 +
 .../auth/kerberos/ClientCallbackHandler.java       |  39 +-
 .../auth/kerberos/KerberosSaslTransportPlugin.java |  70 +-
 .../auth/kerberos/ServerCallbackHandler.java       |   8 +-
 .../auth/plain/PlainSaslTransportPlugin.java       |   2 +-
 .../security/auth/sasl/SaslTransportPlugin.java    |   1 +
 .../serialization/BlowfishTupleSerializer.java     |  11 +-
 .../storm/serialization/DefaultKryoFactory.java    |   6 +-
 .../GzipBridgeThriftSerializationDelegate.java     |   2 +-
 .../apache/storm/serialization/IKryoFactory.java   |  12 +-
 .../storm/serialization/KryoTupleDeserializer.java |  32 +-
 .../storm/serialization/KryoTupleSerializer.java   |  34 +-
 .../serialization/KryoValuesDeserializer.java      |  18 +-
 .../storm/serialization/KryoValuesSerializer.java  |  28 +-
 .../storm/serialization/SerializationFactory.java  |   4 +-
 .../storm/serialization/SerializationRegister.java |   2 +-
 .../org/apache/storm/spout/CheckPointState.java    |  25 +-
 .../org/apache/storm/spout/CheckpointSpout.java    |  11 +-
 .../src/jvm/org/apache/storm/spout/ISpout.java     |  29 +-
 .../jvm/org/apache/storm/spout/RawMultiScheme.java |   4 +-
 .../src/jvm/org/apache/storm/spout/ShellSpout.java |  86 +--
 .../apache/storm/spout/SpoutOutputCollector.java   |  20 +-
 .../org/apache/storm/state/BaseStateIterator.java  |  34 +-
 .../apache/storm/state/DefaultStateEncoder.java    |   9 +-
 .../apache/storm/state/InMemoryKeyValueState.java  |  42 +-
 .../storm/state/InMemoryKeyValueStateProvider.java |   2 +-
 .../jvm/org/apache/storm/state/KeyValueState.java  |   6 +-
 .../jvm/org/apache/storm/state/StateEncoder.java   |  20 +-
 .../jvm/org/apache/storm/state/StateFactory.java   |   6 +-
 .../org/apache/storm/stats/ClientStatsUtil.java    |  15 +-
 .../src/jvm/org/apache/storm/streams/Edge.java     |   2 +-
 .../jvm/org/apache/storm/streams/GroupingInfo.java |   6 +-
 .../src/jvm/org/apache/storm/streams/Node.java     |  28 +-
 .../src/jvm/org/apache/storm/streams/Pair.java     |  28 +-
 .../jvm/org/apache/storm/streams/PairStream.java   |  90 +--
 .../org/apache/storm/streams/ProcessorBolt.java    |   2 +-
 .../storm/streams/ProcessorBoltDelegate.java       |   4 +-
 .../org/apache/storm/streams/ProcessorNode.java    |  13 +-
 .../org/apache/storm/streams/RefCountedTuple.java  |   8 +-
 .../jvm/org/apache/storm/streams/SpoutNode.java    |   4 +-
 .../storm/streams/StatefulProcessorBolt.java       |   4 +-
 .../src/jvm/org/apache/storm/streams/Stream.java   |  26 +-
 .../org/apache/storm/streams/StreamBuilder.java    |  60 +-
 .../jvm/org/apache/storm/streams/StreamState.java  |   2 +-
 .../jvm/org/apache/storm/streams/StreamUtil.java   |   4 +-
 .../storm/streams/WindowedProcessorBolt.java       |   4 +-
 .../streams/operations/aggregators/LongSum.java    |   2 +-
 .../operations/mappers/TupleValueMappers.java      |  24 +-
 .../streams/operations/mappers/ValuesMapper.java   |   2 +-
 .../processors/AggregateByKeyProcessor.java        |  10 +-
 .../streams/processors/AggregateProcessor.java     |  10 +-
 .../storm/streams/processors/BaseProcessor.java    |  26 +-
 .../processors/ChainedProcessorContext.java        |   2 +-
 .../streams/processors/CoGroupByKeyProcessor.java  |   2 +-
 .../processors/EmittingProcessorContext.java       |   4 +-
 .../processors/ForwardingProcessorContext.java     |   4 +-
 .../storm/streams/processors/JoinProcessor.java    |   4 +-
 .../streams/processors/StatefulProcessor.java      |   2 +-
 .../org/apache/storm/streams/tuple/Tuple10.java    | 115 +--
 .../jvm/org/apache/storm/streams/tuple/Tuple3.java |  34 +-
 .../jvm/org/apache/storm/streams/tuple/Tuple4.java |  44 +-
 .../jvm/org/apache/storm/streams/tuple/Tuple5.java |  54 +-
 .../jvm/org/apache/storm/streams/tuple/Tuple6.java |  64 +-
 .../jvm/org/apache/storm/streams/tuple/Tuple7.java |  74 +-
 .../jvm/org/apache/storm/streams/tuple/Tuple8.java |  84 +--
 .../jvm/org/apache/storm/streams/tuple/Tuple9.java | 104 +--
 .../storm/streams/windowing/TumblingWindows.java   |   4 +-
 .../org/apache/storm/streams/windowing/Window.java |   4 +-
 .../apache/storm/task/GeneralTopologyContext.java  |  50 +-
 .../src/jvm/org/apache/storm/task/IBolt.java       |  21 +-
 .../jvm/org/apache/storm/task/IMetricsContext.java |   3 +
 .../jvm/org/apache/storm/task/OutputCollector.java |  34 +-
 .../src/jvm/org/apache/storm/task/ShellBolt.java   | 152 ++--
 .../jvm/org/apache/storm/task/TopologyContext.java | 114 +--
 .../apache/storm/task/WorkerTopologyContext.java   |  43 +-
 .../apache/storm/testing/AckFailMapTracker.java    |  16 +-
 .../jvm/org/apache/storm/testing/AckTracker.java   |  12 +-
 .../testing/AlternateRackDNSToSwitchMapping.java   |   1 +
 .../jvm/org/apache/storm/testing/BoltTracker.java  |   6 +-
 .../org/apache/storm/testing/CompletableSpout.java |  11 +-
 .../jvm/org/apache/storm/testing/FeederSpout.java  |  36 +-
 .../org/apache/storm/testing/FixedTupleSpout.java  |  72 +-
 .../storm/testing/ForwardingMetricsConsumer.java   |  14 +-
 .../jvm/org/apache/storm/testing/IdentityBolt.java |   6 +-
 .../org/apache/storm/testing/MkClusterParam.java   |   8 +-
 .../jvm/org/apache/storm/testing/NGrouping.java    |  14 +-
 .../apache/storm/testing/NonRichBoltTracker.java   |  16 +-
 .../org/apache/storm/testing/PrepareBatchBolt.java |   6 +-
 .../storm/testing/PythonShellMetricsBolt.java      |   4 +-
 .../storm/testing/PythonShellMetricsSpout.java     |   4 +-
 .../jvm/org/apache/storm/testing/SpoutTracker.java |  44 +-
 .../storm/testing/TestAggregatesCounter.java       |  22 +-
 .../jvm/org/apache/storm/testing/TestConfBolt.java |  12 +-
 .../apache/storm/testing/TestEventLogSpout.java    |  10 +-
 .../storm/testing/TestEventOrderCheckBolt.java     |  12 +-
 .../org/apache/storm/testing/TestGlobalCount.java  |  14 +-
 .../org/apache/storm/testing/TestPlannerSpout.java |  12 +-
 .../org/apache/storm/testing/TestWordCounter.java  |  14 +-
 .../org/apache/storm/testing/TestWordSpout.java    |  12 +-
 .../org/apache/storm/testing/TupleCaptureBolt.java |  22 +-
 .../storm/topology/BaseConfigurationDeclarer.java  |   8 +-
 .../storm/topology/BaseStatefulBoltExecutor.java   |  24 +-
 .../apache/storm/topology/BasicBoltExecutor.java   |  26 +-
 .../storm/topology/CheckpointTupleForwarder.java   |   7 +-
 .../storm/topology/ConfigurableTopology.java       |  19 +-
 .../jvm/org/apache/storm/topology/IBasicBolt.java  |   2 +-
 .../org/apache/storm/topology/IStatefulBolt.java   |   3 +
 .../org/apache/storm/topology/InputDeclarer.java   |  73 +-
 .../apache/storm/topology/OutputFieldsGetter.java  |   8 +-
 .../topology/PersistentWindowedBoltExecutor.java   |   4 +-
 .../apache/storm/topology/ResourceDeclarer.java    |   7 +-
 .../storm/topology/SharedOffHeapWithinNode.java    |   4 +-
 .../storm/topology/SharedOffHeapWithinWorker.java  |   2 +-
 .../org/apache/storm/topology/SharedOnHeap.java    |   2 +-
 .../storm/topology/StatefulBoltExecutor.java       |  12 +-
 .../topology/StatefulWindowedBoltExecutor.java     |  31 +-
 .../org/apache/storm/topology/TopologyBuilder.java | 192 ++---
 .../topology/TupleFieldTimestampExtractor.java     |   6 +-
 .../storm/topology/WindowedBoltExecutor.java       |  25 +-
 .../topology/base/BaseTickTupleAwareRichBolt.java  |   2 +-
 .../storm/topology/base/BaseWindowedBolt.java      |  12 +-
 .../storm/transactional/TransactionAttempt.java    |  18 +-
 .../apache/storm/trident/JoinOutFieldsMode.java    |  10 +-
 .../src/jvm/org/apache/storm/trident/Stream.java   | 400 +++++-----
 .../jvm/org/apache/storm/trident/TridentState.java |  20 +-
 .../org/apache/storm/trident/TridentTopology.java  |  99 +--
 .../storm/trident/drpc/ReturnResultsReducer.java   |  20 +-
 .../trident/fluent/ChainedAggregatorDeclarer.java  |  50 +-
 .../apache/storm/trident/fluent/GroupedStream.java |  62 +-
 .../apache/storm/trident/fluent/UniqueIdGen.java   |  12 +-
 .../jvm/org/apache/storm/trident/graph/Group.java  |  47 +-
 .../apache/storm/trident/operation/Assembly.java   |   9 +-
 .../storm/trident/operation/BaseOperation.java     |   2 +-
 .../trident/operation/DefaultResourceDeclarer.java |   5 +-
 .../org/apache/storm/trident/operation/Filter.java |   6 +-
 .../storm/trident/operation/FlatMapFunction.java   |   2 +-
 .../apache/storm/trident/operation/Function.java   |  30 +-
 .../storm/trident/operation/ITridentResource.java  |   2 +
 .../storm/trident/operation/MapFunction.java       |   2 +-
 .../apache/storm/trident/operation/Operation.java  |   2 +-
 .../operation/OperationAwareFlatMapFunction.java   |   2 +-
 .../operation/OperationAwareMapFunction.java       |   2 +-
 .../storm/trident/operation/TridentCollector.java  |  14 +-
 .../operation/TridentMultiReducerContext.java      |   6 +-
 .../trident/operation/TridentOperationContext.java |  32 +-
 .../storm/trident/operation/builtin/Debug.java     |   4 +-
 .../storm/trident/operation/builtin/FirstN.java    |  40 +-
 .../operation/builtin/MaxWithComparator.java       |   6 +-
 .../operation/builtin/MinWithComparator.java       |   6 +-
 .../storm/trident/operation/builtin/Negate.java    |  24 +-
 .../trident/operation/impl/CaptureCollector.java   |   8 +-
 .../operation/impl/ChainedAggregatorImpl.java      |  50 +-
 .../operation/impl/CombinerAggStateUpdater.java    |   6 +-
 .../impl/CombinerAggregatorCombineImpl.java        |   8 +-
 .../operation/impl/CombinerAggregatorInitImpl.java |   6 +-
 .../trident/operation/impl/FilterExecutor.java     |  10 +-
 .../trident/operation/impl/GroupCollector.java     |  14 +-
 .../trident/operation/impl/GroupedAggregator.java  |  42 +-
 .../impl/GroupedMultiReducerExecutor.java          |  36 +-
 .../trident/operation/impl/JoinerMultiReducer.java |  32 +-
 .../PreservingFieldsOrderJoinerMultiReducer.java   |  44 +-
 .../operation/impl/ReducerAggStateUpdater.java     |   6 +-
 .../operation/impl/ReducerAggregatorImpl.java      |   8 +-
 .../operation/impl/SingleEmitAggregator.java       |  24 +-
 .../storm/trident/partition/IdentityGrouping.java  |   6 +-
 .../storm/trident/partition/IndexHashGrouping.java |  12 +-
 .../storm/trident/planner/BridgeReceiver.java      |   8 +-
 .../storm/trident/planner/SubtopologyBolt.java     | 100 +--
 .../planner/processor/AggregateProcessor.java      |  40 +-
 .../trident/planner/processor/AppendCollector.java |  20 +-
 .../trident/planner/processor/EachProcessor.java   |  32 +-
 .../trident/planner/processor/FreshCollector.java  |  20 +-
 .../trident/planner/processor/MapProcessor.java    |  32 +-
 .../planner/processor/MultiReducerProcessor.java   |  52 +-
 .../processor/PartitionPersistProcessor.java       |  48 +-
 .../planner/processor/ProjectedProcessor.java      |  22 +-
 .../planner/processor/StateQueryProcessor.java     |  48 +-
 .../storm/trident/spout/BatchSpoutExecutor.java    |  16 +-
 .../org/apache/storm/trident/spout/IBatchID.java   |   1 +
 .../spout/IOpaquePartitionedTridentSpout.java      |  29 +-
 .../trident/spout/IPartitionedTridentSpout.java    |  26 +-
 .../apache/storm/trident/spout/ITridentSpout.java  |  32 +-
 .../OpaquePartitionedTridentSpoutExecutor.java     | 108 +--
 .../spout/PartitionedTridentSpoutExecutor.java     |  79 +-
 .../trident/spout/RichSpoutBatchExecutor.java      |  54 +-
 .../storm/trident/spout/RichSpoutBatchId.java      |   8 +-
 .../trident/spout/RichSpoutBatchIdSerializer.java  |   2 +-
 .../trident/spout/RichSpoutBatchTriggerer.java     |  84 +--
 .../trident/spout/TridentSpoutCoordinator.java     |  34 +-
 .../storm/trident/spout/TridentSpoutExecutor.java  |  64 +-
 .../state/JSONNonTransactionalSerializer.java      |   1 +
 .../storm/trident/state/JSONOpaqueSerializer.java  |   1 +
 .../trident/state/JSONTransactionalSerializer.java |   1 +
 .../jvm/org/apache/storm/trident/state/State.java  |   6 +-
 .../trident/state/map/CachedBatchReadsMap.java     |  18 +-
 .../apache/storm/trident/state/map/CachedMap.java  |  24 +-
 .../state/map/MapCombinerAggStateUpdater.java      |  30 +-
 .../state/map/MapReducerAggStateUpdater.java       |  32 +-
 .../trident/state/map/MicroBatchIBackingMap.java   |  20 +-
 .../trident/state/map/NonTransactionalMap.java     |  12 +-
 .../apache/storm/trident/state/map/OpaqueMap.java  |  34 +-
 .../storm/trident/state/map/SnapshottableMap.java  |  24 +-
 .../storm/trident/state/map/TransactionalMap.java  |  30 +-
 .../storm/trident/testing/FeederBatchSpout.java    |  60 +-
 .../trident/testing/FeederCommitterBatchSpout.java |  26 +-
 .../storm/trident/testing/LRUMemoryMapState.java   |  48 +-
 .../storm/trident/testing/MemoryBackingMap.java    |   6 +-
 .../storm/trident/testing/MemoryMapState.java      |  56 +-
 .../trident/topology/MasterBatchCoordinator.java   | 136 ++--
 .../storm/trident/topology/TransactionAttempt.java |  20 +-
 .../trident/topology/TridentBoltExecutor.java      | 162 ++--
 .../trident/topology/TridentTopologyBuilder.java   |  14 +-
 .../topology/state/RotatingTransactionalState.java |  76 +-
 .../trident/topology/state/TransactionalState.java |  33 +-
 .../org/apache/storm/trident/tuple/ComboList.java  |  14 +-
 .../org/apache/storm/trident/tuple/ConsList.java   |  20 +-
 .../storm/trident/tuple/TridentTupleView.java      |  88 +--
 .../jvm/org/apache/storm/trident/util/LRUMap.java  |   7 +-
 .../apache/storm/trident/util/TridentUtils.java    |   2 +-
 .../windowing/AbstractTridentWindowManager.java    |  13 +-
 .../trident/windowing/ITridentWindowManager.java   |   9 +-
 .../trident/windowing/InMemoryWindowsStore.java    |  21 +-
 .../storm/trident/windowing/TridentBatchTuple.java |   3 -
 .../trident/windowing/WindowTridentProcessor.java  |  28 +-
 .../trident/windowing/WindowsStateUpdater.java     |   2 +-
 .../trident/windowing/WindowsStoreFactory.java     |   2 +-
 .../trident/windowing/config/BaseWindowConfig.java |   3 -
 .../trident/windowing/config/WindowConfig.java     |   6 -
 .../windowing/strategy/BaseWindowStrategy.java     |   3 -
 .../strategy/SlidingCountWindowStrategy.java       |   6 -
 .../strategy/SlidingDurationWindowStrategy.java    |   6 -
 .../strategy/TumblingCountWindowStrategy.java      |   6 -
 .../strategy/TumblingDurationWindowStrategy.java   |   6 -
 .../trident/windowing/strategy/WindowStrategy.java |   6 -
 .../src/jvm/org/apache/storm/tuple/Fields.java     |  37 +-
 .../src/jvm/org/apache/storm/tuple/MessageId.java  |  20 +-
 .../src/jvm/org/apache/storm/tuple/Tuple.java      |   4 +-
 .../src/jvm/org/apache/storm/tuple/TupleImpl.java  |  35 +-
 .../org/apache/storm/utils/CRC32OutputStream.java  |   1 +
 .../jvm/org/apache/storm/utils/ConfigUtils.java    |  71 +-
 .../jvm/org/apache/storm/utils/CuratorUtils.java   |  35 +-
 .../src/jvm/org/apache/storm/utils/DRPCClient.java |  29 +-
 .../org/apache/storm/utils/InprocMessaging.java    |   4 +-
 .../src/jvm/org/apache/storm/utils/JCQueue.java    |  54 +-
 .../apache/storm/utils/KeyedRoundRobinQueue.java   |  40 +-
 .../jvm/org/apache/storm/utils/ListDelegate.java   |  70 +-
 .../src/jvm/org/apache/storm/utils/LocalState.java |  16 +-
 .../jvm/org/apache/storm/utils/MutableObject.java  |  11 +-
 .../jvm/org/apache/storm/utils/NimbusClient.java   |   6 +-
 .../jvm/org/apache/storm/utils/ObjectReader.java   |  36 +-
 .../jvm/org/apache/storm/utils/RotatingMap.java    |  36 +-
 .../apache/storm/utils/ShellBoltMessageQueue.java  |   9 +-
 .../org/apache/storm/utils/ShellCommandRunner.java |   2 +-
 .../jvm/org/apache/storm/utils/ShellProcess.java   |  26 +-
 .../src/jvm/org/apache/storm/utils/ShellUtils.java |  46 +-
 .../jvm/org/apache/storm/utils/SimpleVersion.java  |  20 +-
 .../utils/StormBoundedExponentialBackoffRetry.java |   2 +-
 .../org/apache/storm/utils/SupervisorClient.java   |  13 +-
 .../jvm/org/apache/storm/utils/TimeCacheMap.java   |  56 +-
 .../src/jvm/org/apache/storm/utils/Utils.java      | 112 ++-
 .../jvm/org/apache/storm/utils/VersionInfo.java    |   8 +-
 .../jvm/org/apache/storm/utils/VersionedStore.java |  16 +-
 .../apache/storm/utils/WindowedTimeThrottler.java  |  24 +-
 .../storm/utils/WrappedDRPCExecutionException.java |   2 +
 .../storm/utils/WrappedHBExecutionException.java   |   1 +
 .../jvm/org/apache/storm/utils/WritableUtils.java  |  43 +-
 .../apache/storm/validation/ConfigValidation.java  |  46 +-
 .../validation/ConfigValidationAnnotations.java    |  58 +-
 .../storm/validation/ConfigValidationUtils.java    |   9 +-
 .../jvm/org/apache/storm/validation/NotConf.java   |   2 +-
 .../jvm/org/apache/storm/validation/Validated.java |   2 +-
 .../storm/windowing/CountEvictionPolicy.java       |   8 +-
 .../apache/storm/windowing/CountTriggerPolicy.java |  10 +-
 .../jvm/org/apache/storm/windowing/EventImpl.java  |   8 +-
 .../apache/storm/windowing/EvictionContext.java    |   6 +-
 .../storm/windowing/StatefulWindowManager.java     |  10 +-
 .../apache/storm/windowing/TimeEvictionPolicy.java |  14 +-
 .../apache/storm/windowing/TimeTriggerPolicy.java  |   6 +-
 .../apache/storm/windowing/TupleWindowImpl.java    |  10 +-
 .../windowing/WatermarkCountEvictionPolicy.java    |   3 +-
 .../windowing/WatermarkCountTriggerPolicy.java     |  10 +-
 .../windowing/WatermarkTimeEvictionPolicy.java     |   7 +-
 .../windowing/WatermarkTimeTriggerPolicy.java      |  10 +-
 .../storm/windowing/WindowLifecycleListener.java   |   6 +-
 .../org/apache/storm/windowing/WindowManager.java  |  24 +-
 .../apache/storm/zookeeper/ClientZookeeper.java    |   2 +-
 .../jvm/org/apache/storm/TestConfigValidate.java   |  32 +-
 .../main/java/org/apache/storm/DaemonConfig.java   | 410 +++++------
 .../org/apache/storm/daemon/nimbus/Nimbus.java     |   3 -
 459 files changed, 6017 insertions(+), 6093 deletions(-)

diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/AggregateExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/AggregateExample.java
index 34ffd7c..626ac8f 100644
--- a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/AggregateExample.java
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/AggregateExample.java
@@ -57,21 +57,21 @@ public class AggregateExample {
 
         @Override
         public Pair<Integer, Integer> apply(Pair<Integer, Integer> sumAndCount, Integer value) {
-            return Pair.of(sumAndCount._1 + value, sumAndCount._2 + 1);
+            return Pair.of(sumAndCount.value1 + value, sumAndCount.value2 + 1);
         }
 
         @Override
         public Pair<Integer, Integer> merge(Pair<Integer, Integer> sumAndCount1, Pair<Integer, Integer> sumAndCount2) {
             System.out.println("Merge " + sumAndCount1 + " and " + sumAndCount2);
             return Pair.of(
-                sumAndCount1._1 + sumAndCount2._1,
-                sumAndCount1._2 + sumAndCount2._2
+                sumAndCount1.value1 + sumAndCount2.value1,
+                sumAndCount1.value2 + sumAndCount2.value2
             );
         }
 
         @Override
         public Double result(Pair<Integer, Integer> sumAndCount) {
-            return (double) sumAndCount._1 / sumAndCount._2;
+            return (double) sumAndCount.value1 / sumAndCount.value2;
         }
     }
 }
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/TypedTupleExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/TypedTupleExample.java
index 481758f..e37e666 100644
--- a/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/TypedTupleExample.java
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/streams/TypedTupleExample.java
@@ -37,7 +37,7 @@ public class TypedTupleExample {
         StreamBuilder builder = new StreamBuilder();
         Stream<Tuple3<Integer, Long, Long>> stream = builder.newStream(new RandomIntegerSpout(), TupleValueMappers.of(0, 1, 2));
 
-        PairStream<Long, Integer> pairs = stream.mapToPair(t -> Pair.of(t._2 / 10000, t._1));
+        PairStream<Long, Integer> pairs = stream.mapToPair(t -> Pair.of(t.value2 / 10000, t.value1));
 
         pairs.window(TumblingWindows.of(Count.of(10))).groupByKey().print();
 
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/Configs.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/Configs.java
index 9859d08..9c54a19 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/Configs.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/Configs.java
@@ -14,11 +14,11 @@ package org.apache.storm.hdfs.spout;
 
 import org.apache.storm.validation.ConfigValidation.Validator;
 import org.apache.storm.validation.ConfigValidationAnnotations.CustomValidator;
-import org.apache.storm.validation.ConfigValidationAnnotations.isBoolean;
-import org.apache.storm.validation.ConfigValidationAnnotations.isInteger;
-import org.apache.storm.validation.ConfigValidationAnnotations.isMapEntryType;
-import org.apache.storm.validation.ConfigValidationAnnotations.isPositiveNumber;
-import org.apache.storm.validation.ConfigValidationAnnotations.isString;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsBoolean;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsInteger;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryType;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsPositiveNumber;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsString;
 import org.apache.storm.validation.NotConf;
 import org.apache.storm.validation.Validated;
 
@@ -28,7 +28,7 @@ public class Configs implements Validated {
      * @deprecated please use {@link HdfsSpout#setReaderType(String)}
      */
     @Deprecated
-    @isString
+    @IsString
     @CustomValidator(validatorClass = ReaderTypeValidator.class)
     public static final String READER_TYPE = "hdfsspout.reader.type";
     public static final String TEXT = "text";
@@ -38,67 +38,67 @@ public class Configs implements Validated {
      * @deprecated please use {@link HdfsSpout#setHdfsUri(String)}
      */
     @Deprecated
-    @isString
+    @IsString
     public static final String HDFS_URI = "hdfsspout.hdfs";
     /**
      * Required - dir from which to read files.
      * @deprecated please use {@link HdfsSpout#setSourceDir(String)}
      */
     @Deprecated
-    @isString
+    @IsString
     public static final String SOURCE_DIR = "hdfsspout.source.dir";
     /**
      * Required - completed files will be moved here.
      * @deprecated please use {@link HdfsSpout#setArchiveDir(String)}
      */
     @Deprecated
-    @isString
+    @IsString
     public static final String ARCHIVE_DIR = "hdfsspout.archive.dir";
     /**
      * Required - unparsable files will be moved here.
      * @deprecated please use {@link HdfsSpout#setBadFilesDir(String)}
      */
     @Deprecated
-    @isString
+    @IsString
     public static final String BAD_DIR = "hdfsspout.badfiles.dir";
     /**
      * Directory in which lock files will be created.
      * @deprecated please use {@link HdfsSpout#setLockDir(String)}
      */
     @Deprecated
-    @isString
+    @IsString
     public static final String LOCK_DIR = "hdfsspout.lock.dir";
     /**
      * Commit after N records. 0 disables this.
      * @deprecated please use {@link HdfsSpout#setCommitFrequencyCount(int)}
      */
     @Deprecated
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String COMMIT_FREQ_COUNT = "hdfsspout.commit.count";
     /**
      * Commit after N secs. cannot be disabled.
      * @deprecated please use {@link HdfsSpout#setCommitFrequencySec(int)}
      */
     @Deprecated
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String COMMIT_FREQ_SEC = "hdfsspout.commit.sec";
     /**
      * Max outstanding.
      * @deprecated please use {@link HdfsSpout#setMaxOutstanding(int)}
      */
     @Deprecated
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String MAX_OUTSTANDING = "hdfsspout.max.outstanding";
     /**
      * Lock timeout.
      * @deprecated please use {@link HdfsSpout#setLockTimeoutSec(int)}
      */
     @Deprecated
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String LOCK_TIMEOUT = "hdfsspout.lock.timeout.sec";
     /**
      * If clocks on machines in the Storm cluster are in sync inactivity duration after which locks are considered
@@ -107,14 +107,14 @@ public class Configs implements Validated {
      * @deprecated please use {@link HdfsSpout#setClocksInSync(boolean)}
      */
     @Deprecated
-    @isBoolean
+    @IsBoolean
     public static final String CLOCKS_INSYNC = "hdfsspout.clocks.insync";
     /**
      * Ignore suffix.
      * @deprecated please use {@link HdfsSpout#setIgnoreSuffix(String)}
      */
     @Deprecated
-    @isString
+    @IsString
     public static final String IGNORE_SUFFIX = "hdfsspout.ignore.suffix";
     /**
      * Filenames with this suffix in archive dir will be ignored by the Spout.
@@ -125,7 +125,7 @@ public class Configs implements Validated {
     public static final int DEFAULT_COMMIT_FREQ_SEC = 10;
     public static final int DEFAULT_MAX_OUTSTANDING = 10000;
     public static final int DEFAULT_LOCK_TIMEOUT = 5 * 60; // 5 min
-    @isMapEntryType(keyType = String.class, valueType = String.class)
+    @IsMapEntryType(keyType = String.class, valueType = String.class)
     public static final String DEFAULT_HDFS_CONFIG_KEY = "hdfs.config";
 
     public static class ReaderTypeValidator extends Validator {
diff --git a/storm-client/pom.xml b/storm-client/pom.xml
index 189267e..77c2dd9 100644
--- a/storm-client/pom.xml
+++ b/storm-client/pom.xml
@@ -166,7 +166,7 @@
                 <!--Note - the version would be inherited-->
                 <configuration>
                     <excludes>**/generated/**</excludes>
-                    <maxAllowedViolations>3067</maxAllowedViolations>
+                    <maxAllowedViolations>0</maxAllowedViolations>
                 </configuration>
             </plugin>
             <plugin>
diff --git a/storm-client/src/jvm/org/apache/storm/Config.java b/storm-client/src/jvm/org/apache/storm/Config.java
index e86e165..2866b03 100644
--- a/storm-client/src/jvm/org/apache/storm/Config.java
+++ b/storm-client/src/jvm/org/apache/storm/Config.java
@@ -36,32 +36,33 @@ import org.apache.storm.validation.ConfigValidation.ListOfListOfStringValidator;
 import org.apache.storm.validation.ConfigValidation.MapOfStringToMapOfStringToObjectValidator;
 import org.apache.storm.validation.ConfigValidation.MetricRegistryValidator;
 import org.apache.storm.validation.ConfigValidation.MetricReportersValidator;
+import org.apache.storm.validation.ConfigValidationAnnotations;
 import org.apache.storm.validation.ConfigValidationAnnotations.CustomValidator;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsBoolean;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsImplementationOfClass;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsInteger;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsKryoReg;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsListEntryCustom;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryCustom;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryType;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsNumber;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsPositiveNumber;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsString;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsStringList;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsStringOrStringList;
+import org.apache.storm.validation.ConfigValidationAnnotations.IsType;
 import org.apache.storm.validation.ConfigValidationAnnotations.NotNull;
 import org.apache.storm.validation.ConfigValidationAnnotations.Password;
-import org.apache.storm.validation.ConfigValidationAnnotations.isBoolean;
-import org.apache.storm.validation.ConfigValidationAnnotations.isImplementationOfClass;
-import org.apache.storm.validation.ConfigValidationAnnotations.isInteger;
-import org.apache.storm.validation.ConfigValidationAnnotations.isKryoReg;
-import org.apache.storm.validation.ConfigValidationAnnotations.isListEntryCustom;
-import org.apache.storm.validation.ConfigValidationAnnotations.isMapEntryCustom;
-import org.apache.storm.validation.ConfigValidationAnnotations.isMapEntryType;
-import org.apache.storm.validation.ConfigValidationAnnotations.isNumber;
-import org.apache.storm.validation.ConfigValidationAnnotations.isPositiveNumber;
-import org.apache.storm.validation.ConfigValidationAnnotations.isString;
-import org.apache.storm.validation.ConfigValidationAnnotations.isStringList;
-import org.apache.storm.validation.ConfigValidationAnnotations.isStringOrStringList;
-import org.apache.storm.validation.ConfigValidationAnnotations.isType;
 
 /**
  * Topology configs are specified as a plain old map. This class provides a convenient way to create a topology config map by providing
  * setter methods for all the configs that can be set. It also makes it easier to do things like add serializations.
  *
- * This class also provides constants for all the configurations possible on a Storm cluster and Storm topology. Each constant is paired
+ * <p>This class also provides constants for all the configurations possible on a Storm cluster and Storm topology. Each constant is paired
  * with an annotation that defines the validity criterion of the corresponding field. Default values for these configs can be found in
  * defaults.yaml.
  *
- * Note that you may put other configurations in any of the configs. Storm will ignore anything it doesn't recognize, but your topologies
+ * <p>Note that you may put other configurations in any of the configs. Storm will ignore anything it doesn't recognize, but your topologies
  * are free to make use of them by reading them in the prepare method of Bolts or the open method of Spouts.
  */
 public class Config extends HashMap<String, Object> {
@@ -69,12 +70,12 @@ public class Config extends HashMap<String, Object> {
     /**
      * The serializer class for ListDelegate (tuple payload). The default serializer will be ListDelegateSerializer
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_TUPLE_SERIALIZER = "topology.tuple.serializer";
     /**
      * Disable load aware grouping support.
      */
-    @isBoolean
+    @IsBoolean
     @NotNull
     public static final String TOPOLOGY_DISABLE_LOADAWARE_MESSAGING = "topology.disable.loadaware.messaging";
     /**
@@ -82,7 +83,7 @@ public class Config extends HashMap<String, Object> {
      * average load is higher than the higher bound, the executor should choose target tasks in a higher scope, The scopes and their orders
      * are: EVERYTHING > RACK_LOCAL > HOST_LOCAL > WORKER_LOCAL
      */
-    @isPositiveNumber
+    @IsPositiveNumber
     @NotNull
     public static final String TOPOLOGY_LOCALITYAWARE_HIGHER_BOUND = "topology.localityaware.higher.bound";
     /**
@@ -90,14 +91,14 @@ public class Config extends HashMap<String, Object> {
      * average load is lower than the lower bound, the executor should choose target tasks in a lower scope. The scopes and their orders
      * are: EVERYTHING > RACK_LOCAL > HOST_LOCAL > WORKER_LOCAL
      */
-    @isPositiveNumber
+    @IsPositiveNumber
     @NotNull
     public static final String TOPOLOGY_LOCALITYAWARE_LOWER_BOUND = "topology.localityaware.lower.bound";
     /**
      * Try to serialize all tuples, even for local transfers.  This should only be used for testing, as a sanity check that all of your
      * tuples are setup properly.
      */
-    @isBoolean
+    @IsBoolean
     public static final String TOPOLOGY_TESTING_ALWAYS_TRY_SERIALIZE = "topology.testing.always.try.serialize";
     /**
      * A map with blobstore keys mapped to each filename the worker will have access to in the launch directory to the blob by local file
@@ -112,14 +113,14 @@ public class Config extends HashMap<String, Object> {
      * How often a worker should check dynamic log level timeouts for expiration. For expired logger settings, the clean up polling task
      * will reset the log levels to the original levels (detected at startup), and will clean up the timeout map
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String WORKER_LOG_LEVEL_RESET_POLL_SECS = "worker.log.level.reset.poll.secs";
     /**
      * How often a task should sync credentials, worst case.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TASK_CREDENTIALS_POLL_SECS = "task.credentials.poll.secs";
     /**
      * Whether to enable backpressure in for a certain topology.
@@ -127,58 +128,58 @@ public class Config extends HashMap<String, Object> {
      * @deprecated: In Storm 2.0. Retained for enabling transition from 1.x. Will be removed soon.
      */
     @Deprecated
-    @isBoolean
+    @IsBoolean
     public static final String TOPOLOGY_BACKPRESSURE_ENABLE = "topology.backpressure.enable";
     /**
      * A list of users that are allowed to interact with the topology.  To use this set nimbus.authorizer to
      * org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_USERS = "topology.users";
     /**
      * A list of groups that are allowed to interact with the topology.  To use this set nimbus.authorizer to
      * org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_GROUPS = "topology.groups";
     /**
      * A list of readonly users that are allowed to interact with the topology.  To use this set nimbus.authorizer to
      * org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_READONLY_USERS = "topology.readonly.users";
     /**
      * A list of readonly groups that are allowed to interact with the topology.  To use this set nimbus.authorizer to
      * org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_READONLY_GROUPS = "topology.readonly.groups";
     /**
      * True if Storm should timeout messages or not. Defaults to true. This is meant to be used in unit tests to prevent tuples from being
      * accidentally timed out during the test.
      */
-    @isBoolean
+    @IsBoolean
     public static final String TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS = "topology.enable.message.timeouts";
     /**
      * When set to true, Storm will log every message that's emitted.
      */
-    @isBoolean
+    @IsBoolean
     public static final String TOPOLOGY_DEBUG = "topology.debug";
     /**
      * User defined version of this topology.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_VERSION = "topology.version";
     /**
      * The fully qualified name of a {@link ShellLogHandler} to handle output from non-JVM processes e.g.
      * "com.mycompany.CustomShellLogHandler". If not provided, org.apache.storm.utils.DefaultLogHandler will be used.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_MULTILANG_LOG_HANDLER = "topology.multilang.log.handler";
     /**
      * The serializer for communication between shell components and non-JVM processes.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_MULTILANG_SERIALIZER = "topology.multilang.serializer";
     /**
      * How many processes should be spawned around the cluster to execute this topology. Each process will execute some number of tasks as
@@ -186,8 +187,8 @@ public class Config extends HashMap<String, Object> {
      * tune the performance of a topology. The number of workers will be dynamically calculated when the Resource Aware scheduler is used,
      * in which case this parameter will not be honored.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_WORKERS = "topology.workers";
     /**
      * How many instances to create for a spout/bolt. A task runs on a thread with zero or more other tasks for the same spout/bolt. The
@@ -195,64 +196,64 @@ public class Config extends HashMap<String, Object> {
      * a spout/bolt can change over time. This allows a topology to scale to more or less resources without redeploying the topology or
      * violating the constraints of Storm (such as a fields grouping guaranteeing that the same value goes to the same task).
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_TASKS = "topology.tasks";
     /**
      * A map of resources used by each component e.g {"cpu.pcore.percent" : 200.0. "onheap.memory.mb": 256.0, "gpu.count" : 2 }
      */
-    @isMapEntryType(keyType = String.class, valueType = Number.class)
+    @IsMapEntryType(keyType = String.class, valueType = Number.class)
     public static final String TOPOLOGY_COMPONENT_RESOURCES_MAP = "topology.component.resources.map";
     /**
      * The maximum amount of memory an instance of a spout/bolt will take on heap. This enables the scheduler to allocate slots on machines
      * with enough available memory. A default value will be set for this config if user does not override
      */
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB = "topology.component.resources.onheap.memory.mb";
     /**
      * The maximum amount of memory an instance of a spout/bolt will take off heap. This enables the scheduler to allocate slots on machines
      * with enough available memory.  A default value will be set for this config if user does not override
      */
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB = "topology.component.resources.offheap.memory.mb";
     /**
      * The config indicates the percentage of cpu for a core an instance(executor) of a component will use. Assuming the a core value to be
      * 100, a value of 10 indicates 10% of the core. The P in PCORE represents the term "physical".  A default value will be set for this
      * config if user does not override
      */
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT = "topology.component.cpu.pcore.percent";
     /**
      * The maximum amount of memory an instance of an acker will take on heap. This enables the scheduler to allocate slots on machines with
      * enough available memory.  A default value will be set for this config if user does not override
      */
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB = "topology.acker.resources.onheap.memory.mb";
     /**
      * The maximum amount of memory an instance of an acker will take off heap. This enables the scheduler to allocate slots on machines
      * with enough available memory.  A default value will be set for this config if user does not override
      */
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_ACKER_RESOURCES_OFFHEAP_MEMORY_MB = "topology.acker.resources.offheap.memory.mb";
     /**
      * The config indicates the percentage of cpu for a core an instance(executor) of an acker will use. Assuming the a core value to be
      * 100, a value of 10 indicates 10% of the core. The P in PCORE represents the term "physical".  A default value will be set for this
      * config if user does not override
      */
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_ACKER_CPU_PCORE_PERCENT = "topology.acker.cpu.pcore.percent";
     /**
      * The maximum amount of memory an instance of a metrics consumer will take on heap. This enables the scheduler to allocate slots on
      * machines with enough available memory.  A default value will be set for this config if user does not override
      */
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_METRICS_CONSUMER_RESOURCES_ONHEAP_MEMORY_MB =
         "topology.metrics.consumer.resources.onheap.memory.mb";
     /**
      * The maximum amount of memory an instance of a metrics consumer will take off heap. This enables the scheduler to allocate slots on
      * machines with enough available memory.  A default value will be set for this config if user does not override
      */
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_METRICS_CONSUMER_RESOURCES_OFFHEAP_MEMORY_MB =
         "topology.metrics.consumer.resources.offheap.memory.mb";
     /**
@@ -260,39 +261,39 @@ public class Config extends HashMap<String, Object> {
      * to be 100, a value of 10 indicates 10% of the core. The P in PCORE represents the term "physical".  A default value will be set for
      * this config if user does not override
      */
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_METRICS_CONSUMER_CPU_PCORE_PERCENT = "topology.metrics.consumer.cpu.pcore.percent";
     /**
      * The class name of the {@link org.apache.storm.state.StateProvider} implementation. If not specified defaults to {@link
      * org.apache.storm.state.InMemoryKeyValueStateProvider}. This can be overridden at the component level.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_STATE_PROVIDER = "topology.state.provider";
     /**
      * The configuration specific to the {@link org.apache.storm.state.StateProvider} implementation. This can be overridden at the
      * component level. The value and the interpretation of this config is based on the state provider implementation. For e.g. this could
      * be just a config file name which contains the config for the state provider implementation.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_STATE_PROVIDER_CONFIG = "topology.state.provider.config";
     /**
      * Topology configuration to specify the checkpoint interval (in millis) at which the topology state is saved when {@link
      * org.apache.storm.topology.IStatefulBolt} bolts are involved.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_STATE_CHECKPOINT_INTERVAL = "topology.state.checkpoint.interval.ms";
     /**
      * A per topology config that specifies the maximum amount of memory a worker can use for that specific topology.
      */
-    @isPositiveNumber
+    @IsPositiveNumber
     public static final String TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB = "topology.worker.max.heap.size.mb";
     /**
      * The strategy to use when scheduling a topology with Resource Aware Scheduler.
      */
     @NotNull
-    @isString
-    //NOTE: @isImplementationOfClass(implementsClass = IStrategy.class) is enforced in DaemonConf, so
+    @IsString
+    //NOTE: @IsImplementationOfClass(implementsClass = IStrategy.class) is enforced in DaemonConf, so
     // an error will be thrown by nimbus on topology submission and not by the client prior to submitting
     // the topology.
     public static final String TOPOLOGY_SCHEDULER_STRATEGY = "topology.scheduler.strategy";
@@ -306,38 +307,38 @@ public class Config extends HashMap<String, Object> {
      * Array of components that scheduler should try to place on separate hosts when using the constraint solver strategy or the
      * multi-tenant scheduler.
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_SPREAD_COMPONENTS = "topology.spread.components";
     /**
      * The maximum number of states that will be searched looking for a solution in the constraint solver strategy.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_SEARCH = "topology.ras.constraint.max.state.search";
     /**
      * The maximum number of states that will be searched looking for a solution in the constraint solver strategy.
      * Backward compatibility config value for old topologies
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_TRAVERSAL = "topology.ras.constraint.max.state.traversal";
     /**
      * The maximum number of seconds to spend scheduling a topology using the constraint solver.  Null means no limit.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_RAS_CONSTRAINT_MAX_TIME_SECS = "topology.ras.constraint.max.time.secs";
     /**
      * A list of host names that this topology would prefer to be scheduled on (no guarantee is given though). This is intended for
      * debugging only.
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_SCHEDULER_FAVORED_NODES = "topology.scheduler.favored.nodes";
     /**
      * A list of host names that this topology would prefer to NOT be scheduled on (no guarantee is given though). This is intended for
      * debugging only.
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_SCHEDULER_UNFAVORED_NODES = "topology.scheduler.unfavored.nodes";
     /**
      * How many executors to spawn for ackers.
@@ -347,18 +348,18 @@ public class Config extends HashMap<String, Object> {
      * If this variable is set to 0, then Storm will immediately ack tuples as soon as they come off the spout,
      * effectively disabling reliability.</p>
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_ACKER_EXECUTORS = "topology.acker.executors";
     /**
      * A list of classes implementing IEventLogger (See storm.yaml.example for exact config format). Each listed class will be routed all
      * the events sampled from emitting tuples. If there's no class provided to the option, default event logger will be initialized and
      * used unless you disable event logger executor.
      *
-     * Note that EventLoggerBolt takes care of all the implementations of IEventLogger, hence registering many implementations (especially
-     * they're implemented as 'blocking' manner) would slow down overall topology.
+     * <p>Note that EventLoggerBolt takes care of all the implementations of IEventLogger, hence registering many
+     * implementations (especially they're implemented as 'blocking' manner) would slow down overall topology.
      */
-    @isListEntryCustom(entryValidatorClasses = { EventLoggerRegistryValidator.class })
+    @IsListEntryCustom(entryValidatorClasses = { EventLoggerRegistryValidator.class })
     public static final String TOPOLOGY_EVENT_LOGGER_REGISTER = "topology.event.logger.register";
     /**
      * How many executors to spawn for event logger.
@@ -367,15 +368,15 @@ public class Config extends HashMap<String, Object> {
      * configured for this topology (or the estimated number of workers if the Resource Aware Scheduler is used).
      * If this variable is set to 0, event logging will be disabled.</p>
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_EVENTLOGGER_EXECUTORS = "topology.eventlogger.executors";
     /**
      * The maximum amount of time given to the topology to fully process a message emitted by a spout. If the message is not acked within
      * this time frame, Storm will fail the message on the spout. Some spouts implementations will then replay the message at a later time.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     @NotNull
     public static final String TOPOLOGY_MESSAGE_TIMEOUT_SECS = "topology.message.timeout.secs";
     /**
@@ -383,23 +384,23 @@ public class Config extends HashMap<String, Object> {
      * for Storm. A serialization can either be the name of a class (in which case Kryo will automatically create a serializer for the class
      * that saves all the object's fields), or an implementation of com.esotericsoftware.kryo.Serializer.
      *
-     * See Kryo's documentation for more information about writing custom serializers.
+     * <p>See Kryo's documentation for more information about writing custom serializers.
      */
-    @isKryoReg
+    @IsKryoReg
     public static final String TOPOLOGY_KRYO_REGISTER = "topology.kryo.register";
     /**
      * A list of classes that customize storm's kryo instance during start-up. Each listed class name must implement IKryoDecorator. During
      * start-up the listed class is instantiated with 0 arguments, then its 'decorate' method is called with storm's kryo instance as the
      * only argument.
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_KRYO_DECORATORS = "topology.kryo.decorators";
     /**
      * Class that specifies how to create a Kryo instance for serialization. Storm will then apply topology.kryo.register and
      * topology.kryo.decorators on top of this. The default implementation implements topology.fall.back.on.java.serialization and turns
      * references off.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_KRYO_FACTORY = "topology.kryo.factory";
     /**
      * Whether or not Storm should skip the loading of kryo registrations for which it does not know the class or have the serializer
@@ -409,12 +410,12 @@ public class Config extends HashMap<String, Object> {
      * other apps. By setting this config to true, Storm will ignore that it doesn't have those other serializations rather than throw an
      * error.
      */
-    @isBoolean
+    @IsBoolean
     public static final String TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS = "topology.skip.missing.kryo.registrations";
     /**
      * List of classes to register during state serialization.
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_STATE_KRYO_REGISTER = "topology.state.kryo.register";
     /**
      * A list of classes implementing IMetricsConsumer (See storm.yaml.example for exact config format). Each listed class will be routed
@@ -422,198 +423,198 @@ public class Config extends HashMap<String, Object> {
      * it's parallelism is configurable.
      */
 
-    @isListEntryCustom(entryValidatorClasses = { MetricRegistryValidator.class })
+    @IsListEntryCustom(entryValidatorClasses = { MetricRegistryValidator.class })
     public static final String TOPOLOGY_METRICS_CONSUMER_REGISTER = "topology.metrics.consumer.register";
     /**
      * Enable tracking of network message byte counts per source-destination task. This is off by default as it creates tasks^2 metric
      * values, but is useful for debugging as it exposes data skew when tuple sizes are uneven.
      */
-    @isBoolean
+    @IsBoolean
     public static final String TOPOLOGY_SERIALIZED_MESSAGE_SIZE_METRICS = "topology.serialized.message.size.metrics";
     /**
      * A map of metric name to class name implementing IMetric that will be created once per worker JVM.
      */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
+    @IsMapEntryType(keyType = String.class, valueType = String.class)
     public static final String TOPOLOGY_WORKER_METRICS = "topology.worker.metrics";
     /**
      * A map of metric name to class name implementing IMetric that will be created once per worker JVM.
      */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
+    @IsMapEntryType(keyType = String.class, valueType = String.class)
     public static final String WORKER_METRICS = "worker.metrics";
     /**
      * The maximum parallelism allowed for a component in this topology. This configuration is typically used in testing to limit the number
      * of threads spawned in local mode.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_MAX_TASK_PARALLELISM = "topology.max.task.parallelism";
     /**
      * The maximum number of tuples that can be pending on a spout task at any given time. This config applies to individual tasks, not to
      * spouts or topologies as a whole.
      *
-     * A pending tuple is one that has been emitted from a spout but has not been acked or failed yet. Note that this config parameter has
-     * no effect for unreliable spouts that don't tag their tuples with a message id.
+     * <p>A pending tuple is one that has been emitted from a spout but has not been acked or failed yet. Note that this
+     * config parameter has no effect for unreliable spouts that don't tag their tuples with a message id.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_MAX_SPOUT_PENDING = "topology.max.spout.pending";
     /**
      * The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS = "topology.sleep.spout.wait.strategy.time.ms";
     /**
      * The maximum amount of time a component gives a source of state to synchronize before it requests synchronization again.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     @NotNull
     public static final String TOPOLOGY_STATE_SYNCHRONIZATION_TIMEOUT_SECS = "topology.state.synchronization.timeout.secs";
     /**
      * The percentage of tuples to sample to produce stats for a task.
      */
-    @isPositiveNumber
+    @IsPositiveNumber
     public static final String TOPOLOGY_STATS_SAMPLE_RATE = "topology.stats.sample.rate";
     /**
      * The time period that builtin metrics data in bucketed into.
      */
-    @isInteger
+    @IsInteger
     public static final String TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS = "topology.builtin.metrics.bucket.size.secs";
     /**
      * Whether or not to use Java serialization in a topology.
      */
-    @isBoolean
+    @IsBoolean
     public static final String TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION = "topology.fall.back.on.java.serialization";
     /**
      * Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.
      */
-    @isStringOrStringList
+    @IsStringOrStringList
     public static final String TOPOLOGY_WORKER_CHILDOPTS = "topology.worker.childopts";
     /**
      * Topology-specific options GC for the worker child process. This overrides WORKER_GC_CHILDOPTS.
      */
-    @isStringOrStringList
+    @IsStringOrStringList
     public static final String TOPOLOGY_WORKER_GC_CHILDOPTS = "topology.worker.gc.childopts";
     /**
      * Topology-specific options for the logwriter process of a worker.
      */
-    @isStringOrStringList
+    @IsStringOrStringList
     public static final String TOPOLOGY_WORKER_LOGWRITER_CHILDOPTS = "topology.worker.logwriter.childopts";
     /**
      * Topology-specific classpath for the worker child process. This is combined to the usual classpath.
      */
-    @isStringOrStringList
+    @IsStringOrStringList
     public static final String TOPOLOGY_CLASSPATH = "topology.classpath";
     /**
      * Topology-specific classpath for the worker child process. This will be *prepended* to the usual classpath, meaning it can override
      * the Storm classpath. This is for debugging purposes, and is disabled by default. To allow topologies to be submitted with user-first
      * classpaths, set the storm.topology.classpath.beginning.enabled config to true.
      */
-    @isStringOrStringList
+    @IsStringOrStringList
     public static final String TOPOLOGY_CLASSPATH_BEGINNING = "topology.classpath.beginning";
     /**
      * Topology-specific environment variables for the worker child process. This is added to the existing environment (that of the
      * supervisor)
      */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
+    @IsMapEntryType(keyType = String.class, valueType = String.class)
     public static final String TOPOLOGY_ENVIRONMENT = "topology.environment";
     /*
      * Bolt-specific configuration for windowed bolts to specify the window length as a count of number of tuples
      * in the window.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_BOLTS_WINDOW_LENGTH_COUNT = "topology.bolts.window.length.count";
     /*
      * Bolt-specific configuration for windowed bolts to specify the window length in time duration.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_BOLTS_WINDOW_LENGTH_DURATION_MS = "topology.bolts.window.length.duration.ms";
     /*
      * Bolt-specific configuration for windowed bolts to specify the sliding interval as a count of number of tuples.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_BOLTS_SLIDING_INTERVAL_COUNT = "topology.bolts.window.sliding.interval.count";
     /*
      * Bolt-specific configuration for windowed bolts to specify the sliding interval in time duration.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_BOLTS_SLIDING_INTERVAL_DURATION_MS = "topology.bolts.window.sliding.interval.duration.ms";
     /**
      * Bolt-specific configuration for windowed bolts to specify the name of the stream on which late tuples are going to be emitted. This
      * configuration should only be used from the BaseWindowedBolt.withLateTupleStream builder method, and not as global parameter,
      * otherwise IllegalArgumentException is going to be thrown.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_BOLTS_LATE_TUPLE_STREAM = "topology.bolts.late.tuple.stream";
     /**
      * Bolt-specific configuration for windowed bolts to specify the maximum time lag of the tuple timestamp in milliseconds. It means that
      * the tuple timestamps cannot be out of order by more than this amount. This config will be effective only if {@link
      * org.apache.storm.windowing.TimestampExtractor} is specified.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_BOLTS_TUPLE_TIMESTAMP_MAX_LAG_MS = "topology.bolts.tuple.timestamp.max.lag.ms";
     /*
      * Bolt-specific configuration for windowed bolts to specify the time interval for generating
      * watermark events. Watermark event tracks the progress of time when tuple timestamp is used.
      * This config is effective only if {@link org.apache.storm.windowing.TimestampExtractor} is specified.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_BOLTS_WATERMARK_EVENT_INTERVAL_MS = "topology.bolts.watermark.event.interval.ms";
     /*
      * Bolt-specific configuration for windowed bolts to specify the name of the field in the tuple that holds
      * the message id. This is used to track the windowing boundaries and avoid re-evaluating the windows
      * during recovery of IStatefulWindowedBolt
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_BOLTS_MESSAGE_ID_FIELD_NAME = "topology.bolts.message.id.field.name";
     /**
      * This config is available for TransactionalSpouts, and contains the id ( a String) for the transactional topology. This id is used to
      * store the state of the transactional topology in Zookeeper.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_TRANSACTIONAL_ID = "topology.transactional.id";
     /**
      * A list of task hooks that are automatically added to every spout and bolt in the topology. An example of when you'd do this is to add
      * a hook that integrates with your internal monitoring system. These hooks are instantiated using the zero-arg constructor.
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_AUTO_TASK_HOOKS = "topology.auto.task.hooks";
     /**
      * The size of the receive queue for each executor.
      */
-    @isPositiveNumber
-    @isInteger
+    @IsPositiveNumber
+    @IsInteger
     public static final String TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE = "topology.executor.receive.buffer.size";
     /**
      * The size of the transfer queue for each worker.
      */
-    @isPositiveNumber
-    @isInteger
+    @IsPositiveNumber
+    @IsInteger
     public static final String TOPOLOGY_TRANSFER_BUFFER_SIZE = "topology.transfer.buffer.size";
     /**
      * The size of the transfer queue for each worker.
      */
-    @isPositiveNumber
-    @isInteger
+    @IsPositiveNumber
+    @IsInteger
     public static final String TOPOLOGY_TRANSFER_BATCH_SIZE = "topology.transfer.batch.size";
     /**
      * How often a tick tuple from the "__system" component and "__tick" stream should be sent to tasks. Meant to be used as a
      * component-specific configuration.
      */
-    @isInteger
+    @IsInteger
     public static final String TOPOLOGY_TICK_TUPLE_FREQ_SECS = "topology.tick.tuple.freq.secs";
     /**
      * The number of tuples to batch before sending to the destination executor.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     @NotNull
     public static final String TOPOLOGY_PRODUCER_BATCH_SIZE = "topology.producer.batch.size";
     /**
@@ -621,98 +622,98 @@ public class Config extends HashMap<String, Object> {
      * prevents OutOfMemoryException that can occur in rare scenarios in the presence of BackPressure. This affects only inter-worker
      * messages. Messages originating from within the same worker will not be dropped.
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     @NotNull
     public static final String TOPOLOGY_EXECUTOR_OVERFLOW_LIMIT = "topology.executor.overflow.limit";
     /**
      * How often a worker should check and notify upstream workers about its tasks that are no longer experiencing BP and able to receive
-     * new messages
+     * new messages.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     @NotNull
     public static final String TOPOLOGY_BACKPRESSURE_CHECK_MILLIS = "topology.backpressure.check.millis";
     /**
      * How often to send flush tuple to the executors for flushing out batched events.
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     @NotNull
     public static final String TOPOLOGY_BATCH_FLUSH_INTERVAL_MILLIS = "topology.batch.flush.interval.millis";
     /**
      * The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed via the TopologyContext.
      */
-    @isInteger
+    @IsInteger
     public static final String TOPOLOGY_WORKER_SHARED_THREAD_POOL_SIZE = "topology.worker.shared.thread.pool.size";
     /**
      * The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example, an interval of 10
      * seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be reported to Zookeeper per task for every
      * 10 second interval of time.
      */
-    @isInteger
+    @IsInteger
     public static final String TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS = "topology.error.throttle.interval.secs";
     /**
-     * See doc for TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS
+     * See doc for {@link #TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS}.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_MAX_ERROR_REPORT_PER_INTERVAL = "topology.max.error.report.per.interval";
     /**
      * How often a batch can be emitted in a Trident topology.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS = "topology.trident.batch.emit.interval.millis";
     /**
      * Maximum number of tuples that can be stored inmemory cache in windowing operators for fast access without fetching them from store.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_TRIDENT_WINDOWING_INMEMORY_CACHE_LIMIT = "topology.trident.windowing.cache.tuple.limit";
     /**
      * The id assigned to a running topology. The id is the storm name with a unique nonce appended.
      */
-    @isString
+    @IsString
     public static final String STORM_ID = "storm.id";
     /**
      * Name of the topology. This config is automatically set by Storm when the topology is submitted.
      */
-    @isString
-    public final static String TOPOLOGY_NAME = "topology.name";
+    @IsString
+    public static final String TOPOLOGY_NAME = "topology.name";
     /**
-     * The principal who submitted a topology
+     * The principal who submitted a topology.
      */
-    @isString
-    public final static String TOPOLOGY_SUBMITTER_PRINCIPAL = "topology.submitter.principal";
+    @IsString
+    public static final String TOPOLOGY_SUBMITTER_PRINCIPAL = "topology.submitter.principal";
     /**
      * The local user name of the user who submitted a topology.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_SUBMITTER_USER = "topology.submitter.user";
     /**
      * A list of IAutoCredentials that the topology should load and use.
      */
-    @isStringList
+    @IsStringList
     public static final String TOPOLOGY_AUTO_CREDENTIALS = "topology.auto-credentials";
     /**
-     * Max pending tuples in one ShellBolt
+     * Max pending tuples in one ShellBolt.
      */
     @NotNull
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_SHELLBOLT_MAX_PENDING = "topology.shellbolt.max.pending";
     /**
      * How long a subprocess can go without heartbeating before the ShellSpout/ShellBolt tries to suicide itself.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_SUBPROCESS_TIMEOUT_SECS = "topology.subprocess.timeout.secs";
     /**
      * Topology central logging sensitivity to determine who has access to logs in central logging system. The possible values are: S0 -
      * Public (open to all users on grid) S1 - Restricted S2 - Confidential S3 - Secret (default.)
      */
-    @isString(acceptedValues = { "S0", "S1", "S2", "S3" })
+    @IsString(acceptedValues = { "S0", "S1", "S2", "S3" })
     public static final String TOPOLOGY_LOGGING_SENSITIVITY = "topology.logging.sensitivity";
     /**
      * Log file the user can use to configure Log4j2.
@@ -721,186 +722,186 @@ public class Config extends HashMap<String, Object> {
      * The configs are merged according to the rules here:
      *   https://logging.apache.org/log4j/2.x/manual/configuration.html#CompositeConfiguration
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_LOGGING_CONFIG_FILE = "topology.logging.config";
 
     /**
-     * Sets the priority for a topology
+     * Sets the priority for a topology.
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_PRIORITY = "topology.priority";
     /**
      * The root directory in ZooKeeper for metadata about TransactionalSpouts.
      */
-    @isString
+    @IsString
     public static final String TRANSACTIONAL_ZOOKEEPER_ROOT = "transactional.zookeeper.root";
     /**
      * The list of zookeeper servers in which to keep the transactional state. If null (which is default), will use storm.zookeeper.servers
      */
-    @isStringList
+    @IsStringList
     public static final String TRANSACTIONAL_ZOOKEEPER_SERVERS = "transactional.zookeeper.servers";
     /**
      * The port to use to connect to the transactional zookeeper servers. If null (which is default), will use storm.zookeeper.port
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TRANSACTIONAL_ZOOKEEPER_PORT = "transactional.zookeeper.port";
     /**
      * The user as which the nimbus client should be acquired to perform the operation.
      */
-    @isString
+    @IsString
     public static final String STORM_DO_AS_USER = "storm.doAsUser";
     /**
      * The number of machines that should be used by this topology to isolate it from all others. Set storm.scheduler to
      * org.apache.storm.scheduler.multitenant.MultitenantScheduler
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TOPOLOGY_ISOLATED_MACHINES = "topology.isolate.machines";
     /**
      * A class that implements a wait strategy for spout. Waiting is triggered in one of two conditions:
      *
-     * 1. nextTuple emits no tuples 2. The spout has hit maxSpoutPending and can't emit any more tuples
+     * <p>1. nextTuple emits no tuples 2. The spout has hit maxSpoutPending and can't emit any more tuples
      *
-     * This class must implement {@link IWaitStrategy}.
+     * <p>This class must implement {@link IWaitStrategy}.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_SPOUT_WAIT_STRATEGY = "topology.spout.wait.strategy";
     /**
      * Configures park time for WaitStrategyPark for spout.  If set to 0, returns immediately (i.e busy wait).
      */
     @NotNull
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_SPOUT_WAIT_PARK_MICROSEC = "topology.spout.wait.park.microsec";
     /**
-     * Configures number of iterations to spend in level 1 of WaitStrategyProgressive, before progressing to level 2
+     * Configures number of iterations to spend in level 1 of WaitStrategyProgressive, before progressing to level 2.
      */
     @NotNull
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_SPOUT_WAIT_PROGRESSIVE_LEVEL1_COUNT = "topology.spout.wait.progressive.level1.count";
     /**
-     * Configures number of iterations to spend in level 2 of WaitStrategyProgressive, before progressing to level 3
+     * Configures number of iterations to spend in level 2 of WaitStrategyProgressive, before progressing to level 3.
      */
     @NotNull
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_SPOUT_WAIT_PROGRESSIVE_LEVEL2_COUNT = "topology.spout.wait.progressive.level2.count";
     /**
      * Configures sleep time for WaitStrategyProgressive.
      */
     @NotNull
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_SPOUT_WAIT_PROGRESSIVE_LEVEL3_SLEEP_MILLIS = "topology.spout.wait.progressive.level3.sleep.millis";
     /**
      * Selects the Bolt's Wait Strategy to use when there are no incoming msgs. Used to trade off latency vs CPU usage. This class must
      * implement {@link IWaitStrategy}.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_BOLT_WAIT_STRATEGY = "topology.bolt.wait.strategy";
     /**
      * Configures park time for WaitStrategyPark.  If set to 0, returns immediately (i.e busy wait).
      */
     @NotNull
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_BOLT_WAIT_PARK_MICROSEC = "topology.bolt.wait.park.microsec";
     /**
-     * Configures number of iterations to spend in level 1 of WaitStrategyProgressive, before progressing to level 2
+     * Configures number of iterations to spend in level 1 of WaitStrategyProgressive, before progressing to level 2.
      */
     @NotNull
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_BOLT_WAIT_PROGRESSIVE_LEVEL1_COUNT = "topology.bolt.wait.progressive.level1.count";
     /**
-     * Configures number of iterations to spend in level 2 of WaitStrategyProgressive, before progressing to level 3
+     * Configures number of iterations to spend in level 2 of WaitStrategyProgressive, before progressing to level 3.
      */
     @NotNull
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_BOLT_WAIT_PROGRESSIVE_LEVEL2_COUNT = "topology.bolt.wait.progressive.level2.count";
     /**
      * Configures sleep time for WaitStrategyProgressive.
      */
     @NotNull
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_BOLT_WAIT_PROGRESSIVE_LEVEL3_SLEEP_MILLIS = "topology.bolt.wait.progressive.level3.sleep.millis";
     /**
      * A class that implements a wait strategy for an upstream component (spout/bolt) trying to write to a downstream component whose recv
      * queue is full
      *
-     * 1. nextTuple emits no tuples 2. The spout has hit maxSpoutPending and can't emit any more tuples
+     * <p>1. nextTuple emits no tuples 2. The spout has hit maxSpoutPending and can't emit any more tuples
      *
-     * This class must implement {@link IWaitStrategy}.
+     * <p>This class must implement {@link IWaitStrategy}.
      */
-    @isString
+    @IsString
     public static final String TOPOLOGY_BACKPRESSURE_WAIT_STRATEGY = "topology.backpressure.wait.strategy";
     /**
      * Configures park time if using WaitStrategyPark for BackPressure. If set to 0, returns immediately (i.e busy wait).
      */
     @NotNull
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_BACKPRESSURE_WAIT_PARK_MICROSEC = "topology.backpressure.wait.park.microsec";
     /**
      * Configures sleep time if using WaitStrategyProgressive for BackPressure.
      */
     @NotNull
-    @isPositiveNumber(includeZero = true)
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_BACKPRESSURE_WAIT_PROGRESSIVE_LEVEL3_SLEEP_MILLIS =
         "topology.backpressure.wait.progressive.level3.sleep.millis";
     /**
      * Configures steps used to determine progression to the next level of wait .. if using WaitStrategyProgressive for BackPressure.
      */
     @NotNull
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_BACKPRESSURE_WAIT_PROGRESSIVE_LEVEL1_COUNT = "topology.backpressure.wait.progressive.level1.count";
     /**
      * Configures steps used to determine progression to the next level of wait .. if using WaitStrategyProgressive for BackPressure.
      */
     @NotNull
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String TOPOLOGY_BACKPRESSURE_WAIT_PROGRESSIVE_LEVEL2_COUNT = "topology.backpressure.wait.progressive.level2.count";
     /**
      * Check recvQ after every N invocations of Spout's nextTuple() [when ACKing is disabled]. Spouts receive very few msgs if ACK is
      * disabled. This avoids checking the recvQ after each nextTuple().
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     @NotNull
     public static final String TOPOLOGY_SPOUT_RECVQ_SKIPS = "topology.spout.recvq.skips";
     /**
      * Minimum number of nimbus hosts where the code must be replicated before leader nimbus is allowed to perform topology activation tasks
      * like setting up heartbeats/assignments and marking the topology as active. default is 0.
      */
-    @isNumber
+    @IsNumber
     public static final String TOPOLOGY_MIN_REPLICATION_COUNT = "topology.min.replication.count";
     /**
      * Maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will
      * go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved. The default is 0
      * seconds, a value of -1 indicates to wait for ever.
      */
-    @isNumber
+    @IsNumber
     public static final String TOPOLOGY_MAX_REPLICATION_WAIT_TIME_SEC = "topology.max.replication.wait.time.sec";
     /**
      * The list of servers that Pacemaker is running on.
      */
-    @isStringList
+    @IsStringList
     public static final String PACEMAKER_SERVERS = "pacemaker.servers";
     /**
      * The port Pacemaker should run on. Clients should connect to this port to submit or read heartbeats.
      */
-    @isNumber
-    @isPositiveNumber
+    @IsNumber
+    @IsPositiveNumber
     public static final String PACEMAKER_PORT = "pacemaker.port";
     /**
      * The maximum number of threads that should be used by the Pacemaker client.
      * When Pacemaker gets loaded it will spawn new threads, up to
      * this many total, to handle the load.
      */
-    @isNumber
-    @isPositiveNumber
+    @IsNumber
+    @IsPositiveNumber
     public static final String PACEMAKER_CLIENT_MAX_THREADS = "pacemaker.client.max.threads";
     /**
      * This should be one of "DIGEST", "KERBEROS", or "NONE" Determines the mode of authentication the pacemaker server and client use. The
@@ -913,189 +914,188 @@ public class Config extends HashMap<String, Object> {
     /**
      * Pacemaker Thrift Max Message Size (bytes).
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String PACEMAKER_THRIFT_MESSAGE_SIZE_MAX = "pacemaker.thrift.message.size.max";
     /**
      * Max no.of seconds group mapping service will cache user groups
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_GROUP_MAPPING_SERVICE_CACHE_DURATION_SECS = "storm.group.mapping.service.cache.duration.secs";
     /**
      * List of DRPC servers so that the DRPCSpout knows who to talk to.
      */
-    @isStringList
+    @IsStringList
     public static final String DRPC_SERVERS = "drpc.servers";
     /**
      * This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results back.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String DRPC_INVOCATIONS_PORT = "drpc.invocations.port";
     /**
      * The number of times to retry a Nimbus operation.
      */
-    @isNumber
+    @IsNumber
     public static final String STORM_NIMBUS_RETRY_TIMES = "storm.nimbus.retry.times";
     /**
      * The starting interval between exponential backoff retries of a Nimbus operation.
      */
-    @isNumber
+    @IsNumber
     public static final String STORM_NIMBUS_RETRY_INTERVAL = "storm.nimbus.retry.interval.millis";
     /**
      * The ceiling of the interval between retries of a client connect to Nimbus operation.
      */
-    @isNumber
+    @IsNumber
     public static final String STORM_NIMBUS_RETRY_INTERVAL_CEILING = "storm.nimbus.retry.intervalceiling.millis";
     /**
-     * The Nimbus transport plug-in for Thrift client/server communication
+     * The Nimbus transport plug-in for Thrift client/server communication.
      */
-    @isString
+    @IsString
     public static final String NIMBUS_THRIFT_TRANSPORT_PLUGIN = "nimbus.thrift.transport";
     /**
      * Which port the Thrift interface of Nimbus should run on. Clients should connect to this port to upload jars and submit topologies.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String NIMBUS_THRIFT_PORT = "nimbus.thrift.port";
     /**
      * Nimbus thrift server queue size, default is 100000. This is the request queue size , when there are more requests than number of
      * threads to serve the requests, those requests will be queued to this queue. If the request queue size > this config, then the
      * incoming requests will be rejected.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String NIMBUS_QUEUE_SIZE = "nimbus.queue.size";
     /**
      * Nimbus assignments backend for storing local assignments. We will use it to store physical plan and runtime storm ids.
      */
-    @isString
-    @isImplementationOfClass(implementsClass = org.apache.storm.assignments.ILocalAssignmentsBackend.class)
+    @IsString
+    @ConfigValidationAnnotations.IsImplementationOfClass(implementsClass = org.apache.storm.assignments.ILocalAssignmentsBackend.class)
     public static final String NIMBUS_LOCAL_ASSIGNMENTS_BACKEND_CLASS = "nimbus.local.assignments.backend.class";
     /**
      * The number of threads that should be used by the nimbus thrift server.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String NIMBUS_THRIFT_THREADS = "nimbus.thrift.threads";
     /**
      * The maximum buffer size thrift should use when reading messages.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String NIMBUS_THRIFT_MAX_BUFFER_SIZE = "nimbus.thrift.max_buffer_size";
     /**
      * How long before a Thrift Client socket hangs before timeout and restart the socket.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_THRIFT_SOCKET_TIMEOUT_MS = "storm.thrift.socket.timeout.ms";
     /**
-     * The DRPC transport plug-in for Thrift client/server communication
+     * The DRPC transport plug-in for Thrift client/server communication.
      */
-    @isString
+    @IsString
     public static final String DRPC_THRIFT_TRANSPORT_PLUGIN = "drpc.thrift.transport";
     /**
      * This port is used by Storm DRPC for receiving DPRC requests from clients.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String DRPC_PORT = "drpc.port";
     /**
-     * DRPC thrift server queue size
+     * DRPC thrift server queue size.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String DRPC_QUEUE_SIZE = "drpc.queue.size";
     /**
-     * DRPC thrift server worker threads
+     * DRPC thrift server worker threads.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String DRPC_WORKER_THREADS = "drpc.worker.threads";
     /**
      * The maximum buffer size thrift should use when reading messages for DRPC.
      */
-    @isNumber
-    @isPositiveNumber
+    @IsNumber
+    @IsPositiveNumber
     public static final String DRPC_MAX_BUFFER_SIZE = "drpc.max_buffer_size";
     /**
-     * The DRPC invocations transport plug-in for Thrift client/server communication
+     * The DRPC invocations transport plug-in for Thrift client/server communication.
      */
-    @isString
+    @IsString
     public static final String DRPC_INVOCATIONS_THRIFT_TRANSPORT_PLUGIN = "drpc.invocations.thrift.transport";
     /**
-     * DRPC invocations thrift server worker threads
+     * DRPC invocations thrift server worker threads.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String DRPC_INVOCATIONS_THREADS = "drpc.invocations.threads";
     /**
      * Initialization parameters for the group mapping service plugin. Provides a way for a
-     * @link{STORM_GROUP_MAPPING_SERVICE_PROVIDER_PLUGIN}
-     * implementation to access optional settings.
+     * {@link #STORM_GROUP_MAPPING_SERVICE_PROVIDER_PLUGIN} implementation to access optional settings.
      */
-    @isType(type = Map.class)
+    @IsType(type = Map.class)
     public static final String STORM_GROUP_MAPPING_SERVICE_PARAMS = "storm.group.mapping.service.params";
     /**
-     * The default transport plug-in for Thrift client/server communication
+     * The default transport plug-in for Thrift client/server communication.
      */
-    @isString
+    @IsString
     public static final String STORM_THRIFT_TRANSPORT_PLUGIN = "storm.thrift.transport";
     /**
      * How long a worker can go without heartbeating before the supervisor tries to restart the worker process.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     @NotNull
     public static final String SUPERVISOR_WORKER_TIMEOUT_SECS = "supervisor.worker.timeout.secs";
     /**
      * How many seconds to allow for graceful worker shutdown when killing workers before resorting to force kill.
      * If a worker fails to shut down gracefully within this delay, it will either suicide or be forcibly killed by the supervisor.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String SUPERVISOR_WORKER_SHUTDOWN_SLEEP_SECS = "supervisor.worker.shutdown.sleep.secs";
     /**
      * A list of hosts of ZooKeeper servers used to manage the cluster.
      */
-    @isStringList
+    @IsStringList
     public static final String STORM_ZOOKEEPER_SERVERS = "storm.zookeeper.servers";
     /**
      * The port Storm will use to connect to each of the ZooKeeper servers.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String STORM_ZOOKEEPER_PORT = "storm.zookeeper.port";
     /**
      * This is part of a temporary workaround to a ZK bug, it is the 'scheme:acl' for the user Nimbus and Supervisors use to authenticate
      * with ZK.
      */
-    @isString
+    @IsString
     public static final String STORM_ZOOKEEPER_SUPERACL = "storm.zookeeper.superACL";
     /**
      * The ACL of the drpc user in zookeeper so the drpc servers can verify worker tokens.
      *
-     * Should be in the form 'scheme:acl' just like STORM_ZOOKEEPER_SUPERACL.
+     * <p>Should be in the form 'scheme:acl' just like STORM_ZOOKEEPER_SUPERACL.
      */
-    @isString
+    @IsString
     public static final String STORM_ZOOKEEPER_DRPC_ACL = "storm.zookeeper.drpcACL";
     /**
      * The topology Zookeeper authentication scheme to use, e.g. "digest". It is the internal config and user shouldn't set it.
      */
-    @isString
+    @IsString
     public static final String STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME = "storm.zookeeper.topology.auth.scheme";
     /**
      * The delegate for serializing metadata, should be used for serialized objects stored in zookeeper and on disk. This is NOT used for
      * compressing serialized tuples sent between topologies.
      */
-    @isString
+    @IsString
     public static final String STORM_META_SERIALIZATION_DELEGATE = "storm.meta.serialization.delegate";
-    @isListEntryCustom(entryValidatorClasses = { MetricReportersValidator.class })
+    @IsListEntryCustom(entryValidatorClasses = { MetricReportersValidator.class })
     public static final String STORM_METRICS_REPORTERS = "storm.metrics.reporters";
     /**
      * What blobstore implementation the storm client should use.
      */
-    @isString
+    @IsString
     public static final String CLIENT_BLOBSTORE = "client.blobstore.class";
 
     /**
@@ -1103,81 +1103,82 @@ public class Config extends HashMap<String, Object> {
      * LocalFsBlobStore it could be either absolute or relative. If the setting is a relative directory, it is relative to root directory of
      * Storm installation.
      */
-    @isString
+    @IsString
     public static final String BLOBSTORE_DIR = "blobstore.dir";
     /**
      * Enable the blobstore cleaner. Certain blobstores may only want to run the cleaner on one daemon. Currently Nimbus handles setting
      * this.
      */
-    @isBoolean
+    @IsBoolean
     public static final String BLOBSTORE_CLEANUP_ENABLE = "blobstore.cleanup.enable";
     /**
      * principal for nimbus/supervisor to use to access secure hdfs for the blobstore.
      * The format is generally "primary/instance@REALM", where "instance" field is optional.
      * If the instance field of the principal is the string "_HOST", it will
-     + be replaced with the host name of the server the daemon is running on (by calling {@link #getBlobstoreHDFSPrincipal(Map conf)} method).
+     * be replaced with the host name of the server the daemon is running on (by calling
+     * {@link #getBlobstoreHDFSPrincipal(Map conf)} method).
      */
-    @isString
+    @IsString
     public static final String BLOBSTORE_HDFS_PRINCIPAL = "blobstore.hdfs.principal";
     /**
      * keytab for nimbus/supervisor to use to access secure hdfs for the blobstore.
      */
-    @isString
+    @IsString
     public static final String BLOBSTORE_HDFS_KEYTAB = "blobstore.hdfs.keytab";
     /**
-     * Set replication factor for a blob in HDFS Blobstore Implementation
+     * Set replication factor for a blob in HDFS Blobstore Implementation.
      */
-    @isPositiveNumber
-    @isInteger
+    @IsPositiveNumber
+    @IsInteger
     public static final String STORM_BLOBSTORE_REPLICATION_FACTOR = "storm.blobstore.replication.factor";
     /**
      * The hostname the supervisors/workers should report to nimbus. If unset, Storm will get the hostname to report by calling
      * <code>InetAddress.getLocalHost().getCanonicalHostName()</code>.
      *
-     * You should set this config when you don't have a DNS which supervisors/workers can utilize to find each other based on hostname got
-     * from calls to
+     * <p>You should set this config when you don't have a DNS which supervisors/workers can utilize to find each other
+     * based on hostname got from calls to
      * <code>InetAddress.getLocalHost().getCanonicalHostName()</code>.
      */
-    @isString
+    @IsString
     public static final String STORM_LOCAL_HOSTNAME = "storm.local.hostname";
     /**
      * List of seed nimbus hosts to use for leader nimbus discovery.
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_SEEDS = "nimbus.seeds";
     /**
      * A list of users that are the only ones allowed to run user operation on storm cluster. To use this set nimbus.authorizer to
      * org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_USERS = "nimbus.users";
     /**
      * A list of groups , users belong to these groups are the only ones allowed to run user operation on storm cluster. To use this set
      * nimbus.authorizer to org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_GROUPS = "nimbus.groups";
     /**
      * The mode this Storm cluster is running in. Either "distributed" or "local".
      */
-    @isString
+    @IsString
     public static final String STORM_CLUSTER_MODE = "storm.cluster.mode";
     /**
      * The root location at which Storm stores data in ZooKeeper.
      */
-    @isString
+    @IsString
     public static final String STORM_ZOOKEEPER_ROOT = "storm.zookeeper.root";
     /**
      * A string representing the payload for topology Zookeeper authentication. It gets serialized using UTF-8 encoding during
      * authentication.
      */
-    @isString
+    @IsString
     @Password
     public static final String STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD = "storm.zookeeper.topology.auth.payload";
     /**
      * The cluster Zookeeper authentication scheme to use, e.g. "digest". Defaults to no authentication.
      */
-    @isString
+    @IsString
     public static final String STORM_ZOOKEEPER_AUTH_SCHEME = "storm.zookeeper.auth.scheme";
     /**
      * A string representing the payload for cluster Zookeeper authentication. It gets serialized using UTF-8 encoding during
@@ -1185,7 +1186,7 @@ public class Config extends HashMap<String, Object> {
      * in the storm-cluster-auth.yaml file. This file storm-cluster-auth.yaml should then be protected with appropriate permissions that
      * deny access from workers.
      */
-    @isString
+    @IsString
     public static final String STORM_ZOOKEEPER_AUTH_PAYLOAD = "storm.zookeeper.auth.payload";
     /**
      * What Network Topography detection classes should we use. Given a list of supervisor hostnames (or IP addresses), this class would
@@ -1193,101 +1194,101 @@ public class Config extends HashMap<String, Object> {
      * resource aware scheduler.
      */
     @NotNull
-    @isImplementationOfClass(implementsClass = org.apache.storm.networktopography.DNSToSwitchMapping.class)
+    @IsImplementationOfClass(implementsClass = org.apache.storm.networktopography.DNSToSwitchMapping.class)
     public static final String STORM_NETWORK_TOPOGRAPHY_PLUGIN = "storm.network.topography.plugin";
     /**
      * The jvm opts provided to workers launched by this supervisor for GC. All "%ID%" substrings are replaced with an identifier for this
      * worker.  Because the JVM complains about multiple GC opts the topology can override this default value by setting
      * topology.worker.gc.childopts.
      */
-    @isStringOrStringList
+    @IsStringOrStringList
     public static final String WORKER_GC_CHILDOPTS = "worker.gc.childopts";
     /**
      * The jvm opts provided to workers launched by this supervisor. All "%ID%", "%WORKER-ID%", "%TOPOLOGY-ID%", "%WORKER-PORT%" and
      * "%HEAP-MEM%" substrings are replaced with: %ID%          -> port (for backward compatibility), %WORKER-ID%   -> worker-id,
      * %TOPOLOGY-ID%    -> topology-id, %WORKER-PORT% -> port. %HEAP-MEM% -> mem-onheap.
      */
-    @isStringOrStringList
+    @IsStringOrStringList
     public static final String WORKER_CHILDOPTS = "worker.childopts";
     /**
-     * The default heap memory size in MB per worker, used in the jvm -Xmx opts for launching the worker
+     * The default heap memory size in MB per worker, used in the jvm -Xmx opts for launching the worker.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String WORKER_HEAP_MEMORY_MB = "worker.heap.memory.mb";
     /**
      * The total amount of memory (in MiB) a supervisor is allowed to give to its workers. A default value will be set for this config if
      * user does not override
      */
-    @isPositiveNumber
+    @IsPositiveNumber
     public static final String SUPERVISOR_MEMORY_CAPACITY_MB = "supervisor.memory.capacity.mb";
     /**
      * The total amount of CPU resources a supervisor is allowed to give to its workers. By convention 1 cpu core should be about 100, but
      * this can be adjusted if needed using 100 makes it simple to set the desired value to the capacity measurement for single threaded
      * bolts.  A default value will be set for this config if user does not override
      */
-    @isPositiveNumber
+    @IsPositiveNumber
     public static final String SUPERVISOR_CPU_CAPACITY = "supervisor.cpu.capacity";
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     /**
      * Port used for supervisor thrift server.
      */
     public static final String SUPERVISOR_THRIFT_PORT = "supervisor.thrift.port";
-    @isString
+    @IsString
     /**
      * The Supervisor invocations transport plug-in for Thrift client/server communication.
      */
     public static final String SUPERVISOR_THRIFT_TRANSPORT_PLUGIN = "supervisor.thrift.transport";
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     /**
      * Supervisor thrift server queue size.
      */
     public static final String SUPERVISOR_QUEUE_SIZE = "supervisor.queue.size";
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     /**
      * The number of threads that should be used by the supervisor thrift server.
      */
     public static final String SUPERVISOR_THRIFT_THREADS = "supervisor.thrift.threads";
-    @isNumber
-    @isPositiveNumber
+    @IsNumber
+    @IsPositiveNumber
     public static final String SUPERVISOR_THRIFT_MAX_BUFFER_SIZE = "supervisor.thrift.max_buffer_size";
     /**
      * How long before a supervisor Thrift Client socket hangs before timeout and restart the socket.
      */
-    @isInteger
+    @IsInteger
     public static final String SUPERVISOR_THRIFT_SOCKET_TIMEOUT_MS = "supervisor.thrift.socket.timeout.ms";
     /**
      * A map of resources the Supervisor has e.g {"cpu.pcore.percent" : 200.0. "onheap.memory.mb": 256.0, "gpu.count" : 2.0 }
      */
-    @isMapEntryType(keyType = String.class, valueType = Number.class)
+    @IsMapEntryType(keyType = String.class, valueType = Number.class)
     public static final String SUPERVISOR_RESOURCES_MAP = "supervisor.resources.map";
     /**
      * Whether or not to use ZeroMQ for messaging in local mode. If this is set to false, then Storm will use a pure-Java messaging system.
      * The purpose of this flag is to make it easy to run Storm in local mode by eliminating the need for native dependencies, which can be
      * difficult to install.
      *
-     * Defaults to false.
+     * <p>Defaults to false.
      */
-    @isBoolean
+    @IsBoolean
     public static final String STORM_LOCAL_MODE_ZMQ = "storm.local.mode.zmq";
     /**
-     * The transporter for communication among Storm tasks
+     * The transporter for communication among Storm tasks.
      */
-    @isString
+    @IsString
     public static final String STORM_MESSAGING_TRANSPORT = "storm.messaging.transport";
     /**
      * Netty based messaging: Is authentication required for Netty messaging from client worker process to server worker process.
      */
-    @isBoolean
+    @IsBoolean
     public static final String STORM_MESSAGING_NETTY_AUTHENTICATION = "storm.messaging.netty.authentication";
     /**
-     * Netty based messaging: The buffer size for send/recv buffer
+     * Netty based messaging: The buffer size for send/recv buffer.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String STORM_MESSAGING_NETTY_BUFFER_SIZE = "storm.messaging.netty.buffer_size";
     /**
      * Netty based messaging: The netty write buffer high watermark in bytes.
@@ -1297,8 +1298,8 @@ public class Config extends HashMap<String, Object> {
      * low water mark}.
      * </p>
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String STORM_MESSAGING_NETTY_BUFFER_HIGH_WATERMARK = "storm.messaging.netty.buffer.high.watermark";
     /**
      * Netty based messaging: The netty write buffer low watermark in bytes.
@@ -1307,299 +1308,301 @@ public class Config extends HashMap<String, Object> {
      * mark} and then dropped down below this value, the netty {@code Channel.isWritable()} will start to return true.
      * </p>
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String STORM_MESSAGING_NETTY_BUFFER_LOW_WATERMARK = "storm.messaging.netty.buffer.low.watermark";
     /**
-     * Netty based messaging: Sets the backlog value to specify when the channel binds to a local address
+     * Netty based messaging: Sets the backlog value to specify when the channel binds to a local address.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String STORM_MESSAGING_NETTY_SOCKET_BACKLOG = "storm.messaging.netty.socket.backlog";
     /**
      * Netty based messaging: The # of worker threads for the server.
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String STORM_MESSAGING_NETTY_SERVER_WORKER_THREADS = "storm.messaging.netty.server_worker_threads";
     /**
      * If the Netty messaging layer is busy, the Netty client will try to batch message as more as possible up to the size of
-     * STORM_NETTY_MESSAGE_BATCH_SIZE bytes
+     * STORM_NETTY_MESSAGE_BATCH_SIZE bytes.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_NETTY_MESSAGE_BATCH_SIZE = "storm.messaging.netty.transfer.batch.size";
     /**
      * Netty based messaging: The min # of milliseconds that a peer will wait.
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String STORM_MESSAGING_NETTY_MIN_SLEEP_MS = "storm.messaging.netty.min_wait_ms";
     /**
      * Netty based messaging: The max # of milliseconds that a peer will wait.
      */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
+    @IsInteger
+    @IsPositiveNumber(includeZero = true)
     public static final String STORM_MESSAGING_NETTY_MAX_SLEEP_MS = "storm.messaging.netty.max_wait_ms";
     /**
      * Netty based messaging: The # of worker threads for the client.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_MESSAGING_NETTY_CLIENT_WORKER_THREADS = "storm.messaging.netty.client_worker_threads";
     /**
      * Should the supervior try to run the worker as the lauching user or not.  Defaults to false.
      */
-    @isBoolean
+    @IsBoolean
     public static final String SUPERVISOR_RUN_WORKER_AS_USER = "supervisor.run.worker.as.user";
     /**
-     * max timeout for supervisor reported heartbeats when master gains leadership
+     * max timeout for supervisor reported heartbeats when master gains leadership.
      */
-    @isInteger
+    @IsInteger
     public static final String SUPERVISOR_WORKER_HEARTBEATS_MAX_TIMEOUT_SECS = "supervisor.worker.heartbeats.max.timeout.secs";
     /**
      * On some systems (windows for example) symlinks require special privileges that not everyone wants to grant a headless user.  You can
      * completely disable the use of symlinks by setting this config to true, but by doing so you may also lose some features from storm.
      * For example the blobstore feature does not currently work without symlinks enabled.
      */
-    @isBoolean
+    @IsBoolean
     public static final String DISABLE_SYMLINKS = "storm.disable.symlinks";
     /**
      * The plugin that will convert a principal to a local user.
      */
-    @isString
+    @IsString
     public static final String STORM_PRINCIPAL_TO_LOCAL_PLUGIN = "storm.principal.tolocal";
     /**
-     * The plugin that will provide user groups service
+     * The plugin that will provide user groups service.
      */
-    @isString
+    @IsString
     public static final String STORM_GROUP_MAPPING_SERVICE_PROVIDER_PLUGIN = "storm.group.mapping.service";
     /**
      * A list of credential renewers that nimbus should load.
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_CREDENTIAL_RENEWERS = "nimbus.credential.renewers.classes";
     /**
      * A list of plugins that nimbus should load during submit topology to populate credentials on user's behalf.
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_AUTO_CRED_PLUGINS = "nimbus.autocredential.plugins.classes";
 
     /**
      * A list of users that run the supervisors and should be authorized to interact with nimbus as a supervisor would.  To use this set
      * nimbus.authorizer to org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer.
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_SUPERVISOR_USERS = "nimbus.supervisor.users";
     /**
      * A list of users that nimbus runs as and should be authorized to interact with the supervisor as nimbus would. To use this set
      * supervisor.authorizer to org.apache.storm.security.auth.authorizer.SupervisorSimpleACLAuthorizer.
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_DAEMON_USERS = "nimbus.daemon.users";
     /**
      * A list of users that are cluster admins and can run any command.  To use this set nimbus.authorizer to
      * org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_ADMINS = "nimbus.admins";
     /**
      * A list of groups that are cluster admins and can run any command.
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_ADMINS_GROUPS = "nimbus.admins.groups";
     /**
-     * For secure mode we would want to turn on this config By default this is turned off assuming the default is insecure
+     * For secure mode we would want to turn on this config By default this is turned off assuming the default is insecure.
      */
-    @isBoolean
+    @IsBoolean
     public static final String STORM_BLOBSTORE_ACL_VALIDATION_ENABLED = "storm.blobstore.acl.validation.enabled";
     /**
      * What buffer size to use for the blobstore uploads.
      */
-    @isPositiveNumber
-    @isInteger
+    @IsPositiveNumber
+    @IsInteger
     public static final String STORM_BLOBSTORE_INPUTSTREAM_BUFFER_SIZE_BYTES = "storm.blobstore.inputstream.buffer.size.bytes";
     /**
      * What chuck size to use for storm client to upload dependency jars.
      */
-    @isPositiveNumber
-    @isInteger
-    public static final String STORM_BLOBSTORE_DEPENDENCY_JAR_UPLOAD_CHUCK_SIZE_BYTES = "storm.blobstore.dependency.jar.upload.chuck.size.bytes";
+    @IsPositiveNumber
+    @IsInteger
+    public static final String STORM_BLOBSTORE_DEPENDENCY_JAR_UPLOAD_CHUCK_SIZE_BYTES =
+            "storm.blobstore.dependency.jar.upload.chuck.size.bytes";
     /**
      * FQCN of a class that implements {@code ISubmitterHook} @see ISubmitterHook for details.
      */
-    @isString
+    @IsString
     public static final String STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN = "storm.topology.submission.notifier.plugin.class";
     /**
      * Impersonation user ACL config entries.
      */
-    @isMapEntryCustom(keyValidatorClasses = { ConfigValidation.StringValidator.class },
+    @IsMapEntryCustom(keyValidatorClasses = { ConfigValidation.StringValidator.class },
         valueValidatorClasses = { ConfigValidation.ImpersonationAclUserEntryValidator.class })
     public static final String NIMBUS_IMPERSONATION_ACL = "nimbus.impersonation.acl";
     /**
      * A whitelist of the RAS scheduler strategies allowed by nimbus. Should be a list of fully-qualified class names or null to allow all.
      */
-    @isStringList
+    @IsStringList
     public static final String NIMBUS_SCHEDULER_STRATEGY_CLASS_WHITELIST = "nimbus.scheduler.strategy.class.whitelist";
     /**
      * Full path to the worker-laucher executable that will be used to lauch workers when SUPERVISOR_RUN_WORKER_AS_USER is set to true.
      */
-    @isString
+    @IsString
     public static final String SUPERVISOR_WORKER_LAUNCHER = "supervisor.worker.launcher";
     /**
      * Map a version of storm to a worker classpath that can be used to run it. This allows the supervisor to select an available version of
      * storm that is compatible with what a topology was launched with.
      *
-     * Only the major and minor version numbers are used, although this may change in the future.  The code will first try to find a version
+     * <p>Only the major and minor version numbers are used, although this may change in the future.  The code will
+     * first try to find a version
      * that is the same or higher than the requested version, but with the same major version number.  If it cannot it will fall back to
      * using one with a lower minor version, but in some cases this might fail as some features may be missing.
      *
-     * Because of how this selection process works please don't include two releases with the same major and minor versions as it is
+     * <p>Because of how this selection process works please don't include two releases with the same major and minor versions as it is
      * undefined which will be selected.  Also it is good practice to just include one release for each major version you want to support
      * unless the minor versions are truly not compatible with each other. This is to avoid maintenance and testing overhead.
      *
-     * This config needs to be set on all supervisors and on nimbus.  In general this can be the output of calling storm classpath on the
+     * <p>This config needs to be set on all supervisors and on nimbus.  In general this can be the output of calling storm classpath on the
      * version you want and adding in an entry for the config directory for that release.  You should modify the storm.yaml of each of these
      * versions to match the features and settings you want on the main version.
      */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
+    @IsMapEntryType(keyType = String.class, valueType = String.class)
     public static final String SUPERVISOR_WORKER_VERSION_CLASSPATH_MAP = "supervisor.worker.version.classpath.map";
     /**
      * Map a version of storm to a worker's main class.  In most cases storm should have correct defaults and just setting
      * SUPERVISOR_WORKER_VERSION_CLASSPATH_MAP is enough.
      */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
+    @IsMapEntryType(keyType = String.class, valueType = String.class)
     public static final String SUPERVISOR_WORKER_VERSION_MAIN_MAP = "supervisor.worker.version.main.map";
     /**
      * Map a version of storm to a worker's logwriter class. In most cases storm should have correct defaults and just setting
      * SUPERVISOR_WORKER_VERSION_CLASSPATH_MAP is enough.
      */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
+    @IsMapEntryType(keyType = String.class, valueType = String.class)
     public static final String SUPERVISOR_WORKER_VERSION_LOGWRITER_MAP = "supervisor.worker.version.logwriter.map";
     /**
      * The version of storm to assume a topology should run as if not version is given by the client when submitting the topology.
      */
-    @isString
+    @IsString
     public static final String SUPERVISOR_WORKER_DEFAULT_VERSION = "supervisor.worker.default.version";
     /**
      * A directory on the local filesystem used by Storm for any local filesystem usage it needs. The directory must exist and the Storm
      * daemons must have permission to read/write from this location. It could be either absolute or relative. If the setting is a relative
      * directory, it is relative to root directory of Storm installation.
      */
-    @isString
+    @IsString
     public static final String STORM_LOCAL_DIR = "storm.local.dir";
     /**
      * The workers-artifacts directory (where we place all workers' logs), can be either absolute or relative. By default,
      * ${storm.log.dir}/workers-artifacts is where worker logs go. If the setting is a relative directory, it is relative to storm.log.dir.
      */
-    @isString
+    @IsString
     public static final String STORM_WORKERS_ARTIFACTS_DIR = "storm.workers.artifacts.dir";
     /**
      * A list of hosts of Exhibitor servers used to discover/maintain connection to ZooKeeper cluster. Any configured ZooKeeper servers will
      * be used for the curator/exhibitor backup connection string.
      */
-    @isStringList
+    @IsStringList
     public static final String STORM_EXHIBITOR_SERVERS = "storm.exhibitor.servers";
     /**
      * The port Storm will use to connect to each of the exhibitor servers.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String STORM_EXHIBITOR_PORT = "storm.exhibitor.port";
     /*
      * How often to poll Exhibitor cluster in millis.
      */
-    @isString
+    @IsString
     public static final String STORM_EXHIBITOR_URIPATH = "storm.exhibitor.poll.uripath";
     /**
      * How often to poll Exhibitor cluster in millis.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_EXHIBITOR_POLL = "storm.exhibitor.poll.millis";
     /**
      * The number of times to retry an Exhibitor operation.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_EXHIBITOR_RETRY_TIMES = "storm.exhibitor.retry.times";
     /*
      * The interval between retries of an Exhibitor operation.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_EXHIBITOR_RETRY_INTERVAL = "storm.exhibitor.retry.interval";
     /**
      * The ceiling of the interval between retries of an Exhibitor operation.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_EXHIBITOR_RETRY_INTERVAL_CEILING = "storm.exhibitor.retry.intervalceiling.millis";
     /**
      * The connection timeout for clients to ZooKeeper.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_ZOOKEEPER_CONNECTION_TIMEOUT = "storm.zookeeper.connection.timeout";
     /**
      * The session timeout for clients to ZooKeeper.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_ZOOKEEPER_SESSION_TIMEOUT = "storm.zookeeper.session.timeout";
     /**
      * The interval between retries of a Zookeeper operation.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_ZOOKEEPER_RETRY_INTERVAL = "storm.zookeeper.retry.interval";
     /**
      * The ceiling of the interval between retries of a Zookeeper operation.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING = "storm.zookeeper.retry.intervalceiling.millis";
     /**
      * The number of times to retry a Zookeeper operation.
      */
-    @isInteger
+    @IsInteger
     public static final String STORM_ZOOKEEPER_RETRY_TIMES = "storm.zookeeper.retry.times";
     /**
      * The ClusterState factory that worker will use to create a ClusterState to store state in. Defaults to ZooKeeper.
      */
-    @isString
+    @IsString
     public static final String STORM_CLUSTER_STATE_STORE = "storm.cluster.state.store";
     /**
      * How often this worker should heartbeat to the supervisor.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String WORKER_HEARTBEAT_FREQUENCY_SECS = "worker.heartbeat.frequency.secs";
     /**
      * How often executor metrics should report to master, used for RPC heartbeat mode.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String EXECUTOR_METRICS_FREQUENCY_SECS = "executor.metrics.frequency.secs";
     /**
      * How often a task should heartbeat its status to the master, deprecated for 2.0 RPC heartbeat reporting, see {@code
      * EXECUTOR_METRICS_FREQUENCY_SECS }.
      */
     @Deprecated
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TASK_HEARTBEAT_FREQUENCY_SECS = "task.heartbeat.frequency.secs";
     /**
      * How often a task should sync its connections with other tasks (if a task is reassigned, the other tasks sending messages to it need
      * to refresh their connections). In general though, when a reassignment happens other tasks will be notified almost immediately. This
      * configuration is here just in case that notification doesn't come through.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String TASK_REFRESH_POLL_SECS = "task.refresh.poll.secs";
     /**
      * The Access Control List for the DRPC Authorizer.
      *
      * @see org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer
      */
-    @isType(type = Map.class)
+    @IsType(type = Map.class)
     public static final String DRPC_AUTHORIZER_ACL = "drpc.authorizer.acl";
     /**
      * File name of the DRPC Authorizer ACL.
      *
      * @see org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer
      */
-    @isString
+    @IsString
     public static final String DRPC_AUTHORIZER_ACL_FILENAME = "drpc.authorizer.acl.filename";
     /**
      * Whether the DRPCSimpleAclAuthorizer should deny requests for operations involving functions that have no explicit ACL entry. When set
@@ -1609,39 +1612,39 @@ public class Config extends HashMap<String, Object> {
      *
      * @see org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer
      */
-    @isBoolean
+    @IsBoolean
     public static final String DRPC_AUTHORIZER_ACL_STRICT = "drpc.authorizer.acl.strict";
     /**
-     * root directory of the storm cgroup hierarchy
+     * root directory of the storm cgroup hierarchy.
      */
-    @isString
+    @IsString
     public static final String STORM_CGROUP_HIERARCHY_DIR = "storm.cgroup.hierarchy.dir";
     /**
-     * The number of Buckets
+     * The number of Buckets.
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String NUM_STAT_BUCKETS = "num.stat.buckets";
     /**
      * Interval to check for the worker to check for updated blobs and refresh worker state accordingly. The default is 10 seconds
      */
-    @isInteger
-    @isPositiveNumber
+    @IsInteger
+    @IsPositiveNumber
     public static final String WORKER_BLOB_UPDATE_POLL_INTERVAL_SECS = "worker.blob.update.poll.interval.secs";
     /**
      * A specify Locale for daemon metrics reporter plugin. Use the specified IETF BCP 47 language tag string for a Locale.
      */
-    @isString
+    @IsString
     public static final String STORM_DAEMON_METRICS_REPORTER_PLUGIN_LOCALE = "storm.daemon.metrics.reporter.plugin.locale";
     /**
      * A specify rate-unit in TimeUnit to specify reporting frequency for daemon metrics reporter plugin.
      */
-    @isString
+    @IsString
     public static final String STORM_DAEMON_METRICS_REPORTER_PLUGIN_RATE_UNIT = "storm.daemon.metrics.reporter.plugin.rate.unit";
     /**
      * A specify duration-unit in TimeUnit to specify reporting window for daemon metrics reporter plugin.
      */
-    @isString
+    @IsString
     public static final String STORM_DAEMON_METRICS_REPORTER_PLUGIN_DURATION_UNIT = "storm.daemon.metrics.reporter.plugin.duration.unit";
     //DO NOT CHANGE UNLESS WE ADD IN STATE NOT STORED IN THE PARENT CLASS
     private static final long serialVersionUID = -1550278723792864455L;
@@ -1778,38 +1781,47 @@ public class Config extends HashMap<String, Object> {
         return ret;
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setClasspath(String cp) {
         setClasspath(this, cp);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setEnvironment(Map<String, Object> env) {
         setEnvironment(this, env);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setDebug(boolean isOn) {
         setDebug(this, isOn);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setTopologyVersion(String version) {
         setTopologyVersion(this, version);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setNumWorkers(int workers) {
         setNumWorkers(this, workers);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setNumAckers(int numExecutors) {
         setNumAckers(this, numExecutors);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setNumEventLoggers(int numExecutors) {
         setNumEventLoggers(this, numExecutors);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setMessageTimeoutSecs(int secs) {
         setMessageTimeoutSecs(this, secs);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void registerSerialization(Class klass) {
         registerSerialization(this, klass);
     }
@@ -1818,6 +1830,7 @@ public class Config extends HashMap<String, Object> {
         registerSerialization(this, klass, serializerClass);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void registerEventLogger(Class<? extends IEventLogger> klass, Map<String, Object> argument) {
         registerEventLogger(this, klass, argument);
     }
@@ -1826,6 +1839,7 @@ public class Config extends HashMap<String, Object> {
         registerEventLogger(this, klass, null);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void registerMetricsConsumer(Class klass, Object argument, long parallelismHint) {
         registerMetricsConsumer(this, klass, argument, parallelismHint);
     }
@@ -1838,30 +1852,37 @@ public class Config extends HashMap<String, Object> {
         registerMetricsConsumer(this, klass);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void registerDecorator(Class<? extends IKryoDecorator> klass) {
         registerDecorator(this, klass);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setKryoFactory(Class<? extends IKryoFactory> klass) {
         setKryoFactory(this, klass);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setSkipMissingKryoRegistrations(boolean skip) {
         setSkipMissingKryoRegistrations(this, skip);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setMaxTaskParallelism(int max) {
         setMaxTaskParallelism(this, max);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setMaxSpoutPending(int max) {
         setMaxSpoutPending(this, max);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setStatsSampleRate(double rate) {
         setStatsSampleRate(this, rate);
     }
 
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public void setFallBackOnJavaSerialization(boolean fallback) {
         setFallBackOnJavaSerialization(this, fallback);
     }
@@ -1890,7 +1911,7 @@ public class Config extends HashMap<String, Object> {
         if (component1 != null && component2 != null) {
             List<String> constraintPair = Arrays.asList(component1, component2);
             List<List<String>> constraints = (List<List<String>>) computeIfAbsent(Config.TOPOLOGY_RAS_CONSTRAINTS,
-                                                                                  (k) -> new ArrayList<>(1));
+                (k) -> new ArrayList<>(1));
             constraints.add(constraintPair);
         }
     }
@@ -1906,8 +1927,6 @@ public class Config extends HashMap<String, Object> {
 
     /**
      * Set the priority for a topology.
-     *
-     * @param priority
      */
     public void setTopologyPriority(int priority) {
         this.put(Config.TOPOLOGY_PRIORITY, priority);
@@ -1919,6 +1938,7 @@ public class Config extends HashMap<String, Object> {
 
     private static final String HOSTNAME_PATTERN = "_HOST";
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public static String getBlobstoreHDFSPrincipal(Map conf) throws UnknownHostException {
         String principal = (String) conf.get(Config.BLOBSTORE_HDFS_PRINCIPAL);
         if (principal != null) {
diff --git a/storm-client/src/jvm/org/apache/storm/ILocalCluster.java b/storm-client/src/jvm/org/apache/storm/ILocalCluster.java
index 1c420de..3c223eb 100644
--- a/storm-client/src/jvm/org/apache/storm/ILocalCluster.java
+++ b/storm-client/src/jvm/org/apache/storm/ILocalCluster.java
@@ -28,26 +28,24 @@ import org.apache.storm.thrift.TException;
  */
 public interface ILocalCluster extends AutoCloseable {
     /**
-     * Submit a topology to be run in local mode
+     * Submit a topology to be run in local mode.
      *
      * @param topologyName the name of the topology to use
      * @param conf         the config for the topology
      * @param topology     the topology itself.
      * @return an AutoCloseable that will kill the topology.
-     *
      * @throws TException on any error from nimbus
      */
     ILocalTopology submitTopology(String topologyName, Map<String, Object> conf, StormTopology topology) throws TException;
 
     /**
-     * Submit a topology to be run in local mode
+     * Submit a topology to be run in local mode.
      *
      * @param topologyName the name of the topology to use
      * @param conf         the config for the topology
      * @param topology     the topology itself.
      * @param submitOpts   options for topology
      * @return an AutoCloseable that will kill the topology.
-     *
      * @throws TException on any error from nimbus
      */
     ILocalTopology submitTopologyWithOpts(String topologyName, Map<String, Object> conf, StormTopology topology,
@@ -63,7 +61,7 @@ public interface ILocalCluster extends AutoCloseable {
     void uploadNewCredentials(String topologyName, Credentials creds) throws TException;
 
     /**
-     * Kill a topology (if you are not using ILocalTopology)
+     * Kill a topology (if you are not using ILocalTopology).
      *
      * @param topologyName the name of the topology
      * @throws TException on any error from nimbus
@@ -71,7 +69,7 @@ public interface ILocalCluster extends AutoCloseable {
     void killTopology(String topologyName) throws TException;
 
     /**
-     * Kill a topology (if you are not using ILocalTopology)
+     * Kill a topology (if you are not using ILocalTopology).
      *
      * @param topologyName the name of the topology
      * @param options      for how to kill the topology
@@ -80,7 +78,7 @@ public interface ILocalCluster extends AutoCloseable {
     void killTopologyWithOpts(String name, KillOptions options) throws TException;
 
     /**
-     * Activate a topology
+     * Activate a topology.
      *
      * @param topologyName the name of the topology to activate
      * @throws TException on any error from nimbus
@@ -88,7 +86,7 @@ public interface ILocalCluster extends AutoCloseable {
     void activate(String topologyName) throws TException;
 
     /**
-     * Deactivate a topology
+     * Deactivate a topology.
      *
      * @param topologyName the name of the topology to deactivate
      * @throws TException on any error from nimbus
@@ -96,7 +94,7 @@ public interface ILocalCluster extends AutoCloseable {
     void deactivate(String topologyName) throws TException;
 
     /**
-     * Rebalance a topology
+     * Rebalance a topology.
      *
      * @param name    the name of the topology
      * @param options options for rebalanceing the topology.
@@ -113,38 +111,35 @@ public interface ILocalCluster extends AutoCloseable {
     void shutdown();
 
     /**
-     * The config of a topology as a JSON string
+     * The config of a topology as a JSON string.
      *
      * @param id the id of the topology (not the name)
      * @return The config of a topology as a JSON string
-     *
      * @throws TException on any error from nimbus
      */
     String getTopologyConf(String id) throws TException;
 
     /**
-     * Get the compiled storm topology
+     * Get the compiled storm topology.
      *
      * @param id the id of the topology (not the name)
      * @return the compiled storm topology
-     *
      * @throws TException on any error from nimbus
      */
     StormTopology getTopology(String id) throws TException;
 
     /**
+     * Get cluster information.
      * @return a summary of the current state of the cluster
-     *
      * @throws TException on any error from nimbus
      */
     ClusterSummary getClusterInfo() throws TException;
 
     /**
-     * Get the state of a topology
+     * Get the state of a topology.
      *
      * @param id the id of the topology (not the name)
      * @return the state of a topology
-     *
      * @throws TException on any error from nimbus
      */
     TopologyInfo getTopologyInfo(String id) throws TException;
@@ -160,7 +155,6 @@ public interface ILocalCluster extends AutoCloseable {
      * Advance the cluster time when the cluster is using SimulatedTime. This is intended for internal testing only.
      *
      * @param secs the number of seconds to advance time
-     * @throws InterruptedException
      */
     void advanceClusterTime(int secs) throws InterruptedException;
 
@@ -169,7 +163,6 @@ public interface ILocalCluster extends AutoCloseable {
      *
      * @param secs  the number of seconds to advance time
      * @param steps the number of steps we should take when advancing simulated time
-     * @throws InterruptedException
      */
     void advanceClusterTime(int secs, int step) throws InterruptedException;
 
diff --git a/storm-client/src/jvm/org/apache/storm/ILocalDRPC.java b/storm-client/src/jvm/org/apache/storm/ILocalDRPC.java
index 1a8916d..037239b 100644
--- a/storm-client/src/jvm/org/apache/storm/ILocalDRPC.java
+++ b/storm-client/src/jvm/org/apache/storm/ILocalDRPC.java
@@ -16,17 +16,18 @@ import org.apache.storm.daemon.Shutdownable;
 import org.apache.storm.generated.DistributedRPC;
 import org.apache.storm.generated.DistributedRPCInvocations;
 
-
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public interface ILocalDRPC extends DistributedRPC.Iface, DistributedRPCInvocations.Iface, Shutdownable, AutoCloseable {
     /**
      * Get the ID of the service.  This is used internally if multiple local DRPC clusters are in use at one time.
      */
-    public String getServiceId();
+    String getServiceId();
 
     /**
+     * Shutdown.
      * @deprecated use {@link #close()} instead
      */
     @Deprecated
     @Override
-    public void shutdown();
+    void shutdown();
 }
diff --git a/storm-client/src/jvm/org/apache/storm/StormSubmitter.java b/storm-client/src/jvm/org/apache/storm/StormSubmitter.java
index 837aecd..ed97e13 100644
--- a/storm-client/src/jvm/org/apache/storm/StormSubmitter.java
+++ b/storm-client/src/jvm/org/apache/storm/StormSubmitter.java
@@ -42,11 +42,11 @@ import org.apache.storm.security.auth.ClientAuthUtils;
 import org.apache.storm.security.auth.IAutoCredentials;
 import org.apache.storm.shade.org.apache.commons.lang.StringUtils;
 import org.apache.storm.shade.org.json.simple.JSONValue;
+import org.apache.storm.thrift.TException;
 import org.apache.storm.utils.BufferFileInputStream;
 import org.apache.storm.utils.NimbusClient;
 import org.apache.storm.utils.Utils;
 import org.apache.storm.validation.ConfigValidation;
-import org.apache.storm.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -63,6 +63,7 @@ public class StormSubmitter {
         return Utils.secureRandomLong() + ":" + Utils.secureRandomLong();
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public static boolean validateZKDigestPayload(String payload) {
         if (payload != null) {
             Matcher m = zkDigestPattern.matcher(payload);
@@ -75,10 +76,9 @@ public class StormSubmitter {
         Map<String, Object> toRet = new HashMap<>();
         String secretPayload = (String) conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD);
         // Is the topology ZooKeeper authentication configuration unset?
-        if (!conf.containsKey(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD) ||
-            conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD) == null ||
-            !validateZKDigestPayload((String)
-                                         conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD))) {
+        if (!conf.containsKey(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD)
+                || conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD) == null
+                || !validateZKDigestPayload((String)conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD))) {
             secretPayload = generateZookeeperDigestSecretPayload();
             LOG.info("Generated ZooKeeper secret payload for MD5-digest: " + secretPayload);
         }
@@ -187,17 +187,29 @@ public class StormSubmitter {
     }
 
     /**
+     * Submits a topology to run on the cluster. A topology runs forever or until explicitly killed.
+     *
+     * @param name             the name of the storm.
+     * @param topoConf         the topology-specific configuration. See {@link Config}.
+     * @param topology         the processing to execute.
+     * @param opts             to manipulate the starting of the topology
+     * @param progressListener to track the progress of the jar upload process
+     * @throws AlreadyAliveException    if a topology with this name is already running
+     * @throws InvalidTopologyException if an invalid topology was submitted
+     * @throws AuthorizationException   if authorization is failed
+     * @thorws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook}
+     */
+    @SuppressWarnings("unchecked")
+    public static void submitTopology(String name, Map<String, Object> topoConf, StormTopology topology, SubmitOptions opts,
+            ProgressListener progressListener) throws AlreadyAliveException, InvalidTopologyException,
+            AuthorizationException {
+        submitTopologyAs(name, topoConf, topology, opts, progressListener, null);
+    }
+
+    /**
      * Submits a topology to run on the cluster as a particular user. A topology runs forever or until explicitly killed.
      *
-     * @param name
-     * @param topoConf
-     * @param topology
-     * @param opts
-     * @param progressListener
-     * @param asUser           The user as which this topology should be submitted.
-     * @throws AlreadyAliveException
-     * @throws InvalidTopologyException
-     * @throws AuthorizationException
+     * @param asUser The user as which this topology should be submitted.
      * @throws IllegalArgumentException thrown if configs will yield an unschedulable topology. validateConfs validates confs
      * @thorws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook}
      */
@@ -337,10 +349,7 @@ public class StormSubmitter {
     }
 
     /**
-     * @param name
-     * @param asUser
-     * @param topoConf
-     * @param topology
+     * Invoke submitter hook.
      * @thorws SubmitterHookException This is thrown when any Exception occurs during initialization or invocation of registered {@link
      *     ISubmitterHook}
      */
@@ -368,26 +377,6 @@ public class StormSubmitter {
     }
 
     /**
-     * Submits a topology to run on the cluster. A topology runs forever or until explicitly killed.
-     *
-     * @param name             the name of the storm.
-     * @param topoConf         the topology-specific configuration. See {@link Config}.
-     * @param topology         the processing to execute.
-     * @param opts             to manipulate the starting of the topology
-     * @param progressListener to track the progress of the jar upload process
-     * @throws AlreadyAliveException    if a topology with this name is already running
-     * @throws InvalidTopologyException if an invalid topology was submitted
-     * @throws AuthorizationException   if authorization is failed
-     * @thorws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook}
-     */
-    @SuppressWarnings("unchecked")
-    public static void submitTopology(String name, Map<String, Object> topoConf, StormTopology topology, SubmitOptions opts,
-                                      ProgressListener progressListener) throws AlreadyAliveException, InvalidTopologyException,
-        AuthorizationException {
-        submitTopologyAs(name, topoConf, topology, opts, progressListener, null);
-    }
-
-    /**
      * Submits a topology to run on the cluster with a progress bar. A topology runs forever or until explicitly killed.
      *
      * @param name     the name of the storm.
@@ -455,7 +444,7 @@ public class StormSubmitter {
     }
 
     /**
-     * Submit jar file
+     * Submit jar file.
      *
      * @param conf     the topology-specific configuration. See {@link Config}.
      * @param localJar file path of the jar file to submit
@@ -465,6 +454,18 @@ public class StormSubmitter {
         return submitJar(conf, localJar, null);
     }
 
+    /**
+     * Submit jar file.
+     *
+     * @param conf     the topology-specific configuration. See {@link Config}.
+     * @param localJar file path of the jar file to submit
+     * @param listener progress listener to track the jar file upload
+     * @return the remote location of the submitted jar
+     */
+    public static String submitJar(Map<String, Object> conf, String localJar, ProgressListener listener) {
+        return submitJarAs(conf, localJar, listener, (String) null);
+    }
+
     public static String submitJarAs(Map<String, Object> conf, String localJar, ProgressListener listener, NimbusClient client) {
         if (localJar == null) {
             throw new RuntimeException(
@@ -518,18 +519,6 @@ public class StormSubmitter {
         }
     }
 
-    /**
-     * Submit jar file
-     *
-     * @param conf     the topology-specific configuration. See {@link Config}.
-     * @param localJar file path of the jar file to submit
-     * @param listener progress listener to track the jar file upload
-     * @return the remote location of the submitted jar
-     */
-    public static String submitJar(Map<String, Object> conf, String localJar, ProgressListener listener) {
-        return submitJarAs(conf, localJar, listener, (String) null);
-    }
-
     private static void validateConfs(Map<String, Object> topoConf, StormTopology topology) throws IllegalArgumentException,
         InvalidTopologyException, AuthorizationException {
         ConfigValidation.validateTopoConf(topoConf);
@@ -537,11 +526,11 @@ public class StormSubmitter {
     }
 
     /**
-     * Interface use to track progress of file upload
+     * Interface use to track progress of file upload.
      */
     public interface ProgressListener {
         /**
-         * called before file is uploaded
+         * called before file is uploaded.
          *
          * @param srcFile    - jar file to be uploaded
          * @param targetFile - destination file
@@ -550,7 +539,7 @@ public class StormSubmitter {
         public void onStart(String srcFile, String targetFile, long totalBytes);
 
         /**
-         * called whenever a chunk of bytes is uploaded
+         * called whenever a chunk of bytes is uploaded.
          *
          * @param srcFile       - jar file to be uploaded
          * @param targetFile    - destination file
@@ -560,7 +549,7 @@ public class StormSubmitter {
         public void onProgress(String srcFile, String targetFile, long bytesUploaded, long totalBytes);
 
         /**
-         * called when the file is uploaded
+         * called when the file is uploaded.
          *
          * @param srcFile    - jar file to be uploaded
          * @param targetFile - destination file
diff --git a/storm-client/src/jvm/org/apache/storm/StormTimer.java b/storm-client/src/jvm/org/apache/storm/StormTimer.java
index bcd39d3..a68384b 100644
--- a/storm-client/src/jvm/org/apache/storm/StormTimer.java
+++ b/storm-client/src/jvm/org/apache/storm/StormTimer.java
@@ -65,6 +65,10 @@ public class StormTimer implements AutoCloseable {
         scheduleMs(Time.secsToMillisLong(delaySecs), func, checkActive, jitterMs);
     }
 
+    public void schedule(int delaySecs, Runnable func) {
+        schedule(delaySecs, func, true, 0);
+    }
+
     /**
      * Same as schedule with millisecond resolution.
      *
@@ -88,10 +92,6 @@ public class StormTimer implements AutoCloseable {
         task.add(new QueueEntry(endTimeMs, func, id));
     }
 
-    public void schedule(int delaySecs, Runnable func) {
-        schedule(delaySecs, func, true, 0);
-    }
-
     public void scheduleMs(long delayMs, Runnable func) {
         scheduleMs(delayMs, func, true, 0);
     }
diff --git a/storm-client/src/jvm/org/apache/storm/Thrift.java b/storm-client/src/jvm/org/apache/storm/Thrift.java
index 1745cd1..acba4ae 100644
--- a/storm-client/src/jvm/org/apache/storm/Thrift.java
+++ b/storm-client/src/jvm/org/apache/storm/Thrift.java
@@ -218,8 +218,8 @@ public class Thrift {
     }
 
     public static SpoutSpec prepareSerializedSpoutDetails(IRichSpout spout, Map<String, StreamInfo> outputs) {
-        return new SpoutSpec(ComponentObject.serialized_java
-            (Utils.javaSerialize(spout)), prepareComponentCommon(new HashMap<>(), outputs, null, null));
+        return new SpoutSpec(ComponentObject.serialized_java(Utils.javaSerialize(spout)),
+                prepareComponentCommon(new HashMap<>(), outputs, null, null));
     }
 
     public static Bolt prepareSerializedBoltDetails(Map<GlobalStreamId, Grouping> inputs, IBolt bolt, Map<String, StreamInfo> outputs,
@@ -256,17 +256,17 @@ public class Thrift {
         return details;
     }
 
-    public static StormTopology buildTopology(HashMap<String, SpoutDetails> spoutMap,
-                                              HashMap<String, BoltDetails> boltMap, HashMap<String, StateSpoutSpec> stateMap) {
-        return buildTopology(spoutMap, boltMap);
-    }
-
     private static void addInputs(BoltDeclarer declarer, Map<GlobalStreamId, Grouping> inputs) {
         for (Entry<GlobalStreamId, Grouping> entry : inputs.entrySet()) {
             declarer.grouping(entry.getKey(), entry.getValue());
         }
     }
 
+    public static StormTopology buildTopology(HashMap<String, SpoutDetails> spoutMap,
+                                              HashMap<String, BoltDetails> boltMap, HashMap<String, StateSpoutSpec> stateMap) {
+        return buildTopology(spoutMap, boltMap);
+    }
+
     public static StormTopology buildTopology(Map<String, SpoutDetails> spoutMap, Map<String, BoltDetails> boltMap) {
         TopologyBuilder builder = new TopologyBuilder();
         for (Entry<String, SpoutDetails> entry : spoutMap.entrySet()) {
diff --git a/storm-client/src/jvm/org/apache/storm/annotation/InterfaceStability.java b/storm-client/src/jvm/org/apache/storm/annotation/InterfaceStability.java
index 1cd0de9..0a57ac1 100644
--- a/storm-client/src/jvm/org/apache/storm/annotation/InterfaceStability.java
+++ b/storm-client/src/jvm/org/apache/storm/annotation/InterfaceStability.java
@@ -18,7 +18,6 @@ import java.lang.annotation.RetentionPolicy;
 
 /**
  * Annotation to inform users of how much to rely on a particular package, class or method not changing over time.
- * </ul>
  */
 @InterfaceStability.Evolving
 public class InterfaceStability {
diff --git a/storm-client/src/jvm/org/apache/storm/assignments/InMemoryAssignmentBackend.java b/storm-client/src/jvm/org/apache/storm/assignments/InMemoryAssignmentBackend.java
index 03a400c..8854e38 100644
--- a/storm-client/src/jvm/org/apache/storm/assignments/InMemoryAssignmentBackend.java
+++ b/storm-client/src/jvm/org/apache/storm/assignments/InMemoryAssignmentBackend.java
@@ -24,6 +24,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * An assignment backend which will keep all assignments and id-info in memory. Only used if no backend is specified internal.
+ *
  * <p>About thread safe: idToAssignment,idToName,nameToId are all memory cache in nimbus local, for
  * <ul>
  * <li>idToAssignment: nimbus will modify it and supervisors will sync it at fixed interval,
diff --git a/storm-client/src/jvm/org/apache/storm/blobstore/BlobStore.java b/storm-client/src/jvm/org/apache/storm/blobstore/BlobStore.java
index 6cf9df9..c9219c5 100644
--- a/storm-client/src/jvm/org/apache/storm/blobstore/BlobStore.java
+++ b/storm-client/src/jvm/org/apache/storm/blobstore/BlobStore.java
@@ -37,16 +37,17 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Provides a way to store blobs that can be downloaded. Blobs must be able to be uploaded and listed from Nimbus, and downloaded from the
- * Supervisors. It is a key value based store. Key being a string and value being the blob data.
+ * Provides a way to store blobs that can be downloaded. Blobs must be able to be uploaded and listed from Nimbus, and
+ * downloaded from the Supervisors. It is a key value based store. Key being a string and value being the blob data.
  *
- * ACL checking must take place against the provided subject. If the blob store does not support Security it must validate that all ACLs set
- * are always WORLD, everything.
+ * <p>ACL checking must take place against the provided subject. If the blob store does not support Security it must
+ * validate that all ACLs set are always WORLD, everything.
  *
- * The users can upload their blobs through the blob store command line. The command line also allows us to update and delete blobs.
+ * <p>The users can upload their blobs through the blob store command line. The command line also allows us to update
+ * and delete blobs.
  *
- * Modifying the replication factor only works for HdfsBlobStore as for the LocalFsBlobStore the replication is dependent on the number of
- * Nimbodes available.
+ * <p>Modifying the replication factor only works for HdfsBlobStore as for the LocalFsBlobStore the replication is
+ * dependent on the number of Nimbodes available.
  */
 public abstract class BlobStore implements Shutdownable, AutoCloseable {
     protected static final String BASE_BLOBS_DIR_NAME = "blobs";
@@ -54,9 +55,9 @@ public abstract class BlobStore implements Shutdownable, AutoCloseable {
     private static final KeyFilter<String> TO_TOPO_ID = (key) -> ConfigUtils.getIdFromBlobKey(key);
 
     /**
-     * Validates key checking for potentially harmful patterns
+     * Validates key checking for potentially harmful patterns.
      *
-     * @param key Key for the blob.
+     * @param key Key for the blob
      */
     public static final void validateKey(String key) throws IllegalArgumentException {
         if (!Utils.isValidKey(key)) {
@@ -65,11 +66,11 @@ public abstract class BlobStore implements Shutdownable, AutoCloseable {
     }
 
     /**
-     * Allows us to initialize the blob store
+     * Allows us to initialize the blob store.
      *
      * @param conf       The storm configuration
      * @param baseDir    The directory path to store the blobs
-     * @param nimbusInfo Contains the nimbus host, port and leadership information.
+     * @param nimbusInfo Contains the nimbus host, port and leadership information
      */
     public abstract void prepare(Map<String, Object> conf, String baseDir, NimbusInfo nimbusInfo, ILeaderElector leaderElector);
 
@@ -85,109 +86,163 @@ public abstract class BlobStore implements Shutdownable, AutoCloseable {
     /**
      * Creates the blob.
      *
-     * @param key  Key for the blob.
+     * @param key  Key for the blob
      * @param meta Metadata which contains the acls information
-     * @param who  Is the subject creating the blob.
-     * @return AtomicOutputStream returns a stream into which the data can be written.
-     *
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
+     * @param who  Is the subject creating the blob
+     * @return AtomicOutputStream returns a stream into which the data can be written
      */
     public abstract AtomicOutputStream createBlob(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException,
         KeyAlreadyExistsException;
 
     /**
-     * Updates the blob data.
+     * Wrapper called to create the blob which contains the byte data.
      *
-     * @param key Key for the blob.
-     * @param who Is the subject having the write privilege for the blob.
-     * @return AtomicOutputStream returns a stream into which the data can be written.
+     * @param key  Key for the blob
+     * @param data Byte data that needs to be uploaded
+     * @param meta Metadata which contains the acls information
+     * @param who  Is the subject creating the blob
+     */
+    public void createBlob(String key, byte[] data, SettableBlobMeta meta, Subject who) throws AuthorizationException,
+            KeyAlreadyExistsException, IOException {
+        AtomicOutputStream out = null;
+        try {
+            out = createBlob(key, meta, who);
+            out.write(data);
+            out.close();
+            out = null;
+        } finally {
+            if (out != null) {
+                out.cancel();
+            }
+        }
+    }
+
+    /**
+     * Wrapper called to create the blob which contains the byte data.
+     *
+     * @param key  Key for the blob
+     * @param in   InputStream from which the data is read to be written as a part of the blob
+     * @param meta Metadata which contains the acls information
+     * @param who  Is the subject creating the blob
+     */
+    public void createBlob(String key, InputStream in, SettableBlobMeta meta, Subject who) throws AuthorizationException,
+            KeyAlreadyExistsException, IOException {
+        AtomicOutputStream out = null;
+        try {
+            out = createBlob(key, meta, who);
+            byte[] buffer = new byte[2048];
+            int len = 0;
+            while ((len = in.read(buffer)) > 0) {
+                out.write(buffer, 0, len);
+            }
+            out.close();
+            out = null;
+        } finally {
+            try {
+                if (out != null) {
+                    out.cancel();
+                }
+                in.close();
+            } catch (IOException throwaway) {
+                // Ignored
+            }
+        }
+    }
+
+    /**
+     * Updates the blob data.
      *
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key Key for the blob
+     * @param who Is the subject having the write privilege for the blob
+     * @return AtomicOutputStream returns a stream into which the data can be written
      */
     public abstract AtomicOutputStream updateBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException;
 
     /**
-     * Gets the current version of metadata for a blob to be viewed by the user or downloaded by the supervisor.
+     * Wrapper called to create the blob which contains the byte data.
      *
-     * @param key Key for the blob.
-     * @param who Is the subject having the read privilege for the blob.
-     * @return AtomicOutputStream returns a stream into which the data can be written.
+     * @param key  Key for the blob
+     * @param data Byte data that needs to be uploaded
+     * @param who  Is the subject creating the blob
+     */
+    public void updateBlob(String key, byte[] data, Subject who) throws AuthorizationException, IOException, KeyNotFoundException {
+        AtomicOutputStream out = null;
+        try {
+            out = updateBlob(key, who);
+            out.write(data);
+            out.close();
+            out = null;
+        } finally {
+            if (out != null) {
+                out.cancel();
+            }
+        }
+    }
+
+    /**
+     * Gets the current version of metadata for a blob to be viewed by the user or downloaded by the supervisor.
      *
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key Key for the blob
+     * @param who Is the subject having the read privilege for the blob
+     * @return AtomicOutputStream returns a stream into which the data can be written
      */
     public abstract ReadableBlobMeta getBlobMeta(String key, Subject who) throws AuthorizationException, KeyNotFoundException;
 
     /**
-     * Sets leader elector (only used by LocalFsBlobStore to help sync blobs between Nimbi
-     * @param leaderElector
+     * Sets leader elector (only used by LocalFsBlobStore to help sync blobs between Nimbi.
      */
 
     public abstract void setLeaderElector(ILeaderElector leaderElector);
+
     /**
      * Sets the metadata with renewed acls for the blob.
      *
-     * @param key  Key for the blob.
-     * @param meta Metadata which contains the updated acls information.
-     * @param who  Is the subject having the write privilege for the blob.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key  Key for the blob
+     * @param meta Metadata which contains the updated acls information
+     * @param who  Is the subject having the write privilege for the blob
      */
     public abstract void setBlobMeta(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyNotFoundException;
 
     /**
      * Deletes the blob data and metadata.
      *
-     * @param key Key for the blob.
-     * @param who Is the subject having write privilege for the blob.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key Key for the blob
+     * @param who Is the subject having write privilege for the blob
      */
     public abstract void deleteBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException;
 
     /**
-     * Gets the InputStream to read the blob details
+     * Gets the InputStream to read the blob details.
      *
-     * @param key Key for the blob.
-     * @param who Is the subject having the read privilege for the blob.
-     * @return InputStreamWithMeta has the additional file length and version information.
-     *
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key Key for the blob
+     * @param who Is the subject having the read privilege for the blob
+     * @return InputStreamWithMeta has the additional file length and version information
      */
     public abstract InputStreamWithMeta getBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException;
 
     /**
      * Returns an iterator with all the list of keys currently available on the blob store.
      *
-     * @return Iterator<String>
+     * @return {@code Iterator<String>}
      */
     public abstract Iterator<String> listKeys();
 
     /**
      * Gets the replication factor of the blob.
      *
-     * @param key Key for the blob.
-     * @param who Is the subject having the read privilege for the blob.
-     * @return BlobReplication object containing the replication factor for the blob.
-     *
-     * @throws Exception
+     * @param key Key for the blob
+     * @param who Is the subject having the read privilege for the blob
+     * @return BlobReplication object containing the replication factor for the blob
      */
     public abstract int getBlobReplication(String key, Subject who) throws Exception;
 
     /**
      * Modifies the replication factor of the blob.
      *
-     * @param key         Key for the blob.
-     * @param replication The replication factor the blob has to be set.
+     * @param key         Key for the blob
+     * @param replication The replication factor the blob has to be set
      * @param who         Is the subject having the update privilege for the blob
-     * @return BlobReplication object containing the updated replication factor for the blob.
-     *
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     * @throws IOException
+     * @return BlobReplication object containing the updated replication factor for the blob
      */
     public abstract int updateBlobReplication(String key, int replication, Subject who) throws AuthorizationException, KeyNotFoundException,
         IOException;
@@ -218,99 +273,11 @@ public abstract class BlobStore implements Shutdownable, AutoCloseable {
     }
 
     /**
-     * Wrapper called to create the blob which contains the byte data
-     *
-     * @param key  Key for the blob.
-     * @param data Byte data that needs to be uploaded.
-     * @param meta Metadata which contains the acls information
-     * @param who  Is the subject creating the blob.
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
-     * @throws IOException
-     */
-    public void createBlob(String key, byte[] data, SettableBlobMeta meta, Subject who) throws AuthorizationException,
-        KeyAlreadyExistsException, IOException {
-        AtomicOutputStream out = null;
-        try {
-            out = createBlob(key, meta, who);
-            out.write(data);
-            out.close();
-            out = null;
-        } finally {
-            if (out != null) {
-                out.cancel();
-            }
-        }
-    }
-
-    /**
-     * Wrapper called to create the blob which contains the byte data
-     *
-     * @param key  Key for the blob.
-     * @param data Byte data that needs to be uploaded.
-     * @param who  Is the subject creating the blob.
-     * @throws AuthorizationException
-     * @throws IOException
-     * @throws KeyNotFoundException
-     */
-    public void updateBlob(String key, byte[] data, Subject who) throws AuthorizationException, IOException, KeyNotFoundException {
-        AtomicOutputStream out = null;
-        try {
-            out = updateBlob(key, who);
-            out.write(data);
-            out.close();
-            out = null;
-        } finally {
-            if (out != null) {
-                out.cancel();
-            }
-        }
-    }
-
-    /**
-     * Wrapper called to create the blob which contains the byte data
-     *
-     * @param key  Key for the blob.
-     * @param in   InputStream from which the data is read to be written as a part of the blob.
-     * @param meta Metadata which contains the acls information
-     * @param who  Is the subject creating the blob.
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
-     * @throws IOException
-     */
-    public void createBlob(String key, InputStream in, SettableBlobMeta meta, Subject who) throws AuthorizationException,
-        KeyAlreadyExistsException, IOException {
-        AtomicOutputStream out = null;
-        try {
-            out = createBlob(key, meta, who);
-            byte[] buffer = new byte[2048];
-            int len = 0;
-            while ((len = in.read(buffer)) > 0) {
-                out.write(buffer, 0, len);
-            }
-            out.close();
-            out = null;
-        } finally {
-            try {
-                if (out != null) {
-                    out.cancel();
-                }
-                in.close();
-            } catch (IOException throwaway) {
-                // Ignored
-            }
-        }
-    }
-
-    /**
      * Reads the blob from the blob store and writes it into the output stream.
      *
-     * @param key Key for the blob.
+     * @param key Key for the blob
      * @param out Output stream
-     * @param who Is the subject having read privilege for the blob.
-     * @throws IOException
-     * @throws KeyNotFoundException
-     * @throws AuthorizationException
+     * @param who Is the subject having read privilege for the blob
      */
     public void readBlobTo(String key, OutputStream out, Subject who) throws IOException, KeyNotFoundException, AuthorizationException {
         InputStreamWithMeta in = getBlob(key, who);
@@ -332,13 +299,8 @@ public abstract class BlobStore implements Shutdownable, AutoCloseable {
     /**
      * Wrapper around readBlobTo which returns a ByteArray output stream.
      *
-     * @param key Key for the blob.
-     * @param who Is the subject having the read privilege for the blob.
-     * @return ByteArrayOutputStream
-     *
-     * @throws IOException
-     * @throws KeyNotFoundException
-     * @throws AuthorizationException
+     * @param key Key for the blob
+     * @param who Is the subject having the read privilege for the blob
      */
     public byte[] readBlob(String key, Subject who) throws IOException, KeyNotFoundException, AuthorizationException {
         ByteArrayOutputStream out = new ByteArrayOutputStream();
@@ -349,6 +311,7 @@ public abstract class BlobStore implements Shutdownable, AutoCloseable {
     }
 
     /**
+     * Get IDs stored in blob store.
      * @return a set of all of the topology ids with special data stored in the blob store.
      */
     public Set<String> storedTopoIds() {
@@ -356,7 +319,7 @@ public abstract class BlobStore implements Shutdownable, AutoCloseable {
     }
 
     /**
-     * Blob store implements its own version of iterator to list the blobs
+     * Blob store implements its own version of iterator to list the blobs.
      */
     public static class KeyTranslationIterator implements Iterator<String> {
         private Iterator<String> it = null;
diff --git a/storm-client/src/jvm/org/apache/storm/blobstore/BlobStoreAclHandler.java b/storm-client/src/jvm/org/apache/storm/blobstore/BlobStoreAclHandler.java
index 3b59022..2270142 100644
--- a/storm-client/src/jvm/org/apache/storm/blobstore/BlobStoreAclHandler.java
+++ b/storm-client/src/jvm/org/apache/storm/blobstore/BlobStoreAclHandler.java
@@ -46,7 +46,7 @@ public class BlobStoreAclHandler {
     public static final List<AccessControl> WORLD_EVERYTHING =
         Arrays.asList(new AccessControl(AccessControlType.OTHER, READ | WRITE | ADMIN));
     public static final List<AccessControl> DEFAULT = new ArrayList<AccessControl>();
-    private final IPrincipalToLocal _ptol;
+    private final IPrincipalToLocal ptol;
     private final IGroupMappingServiceProvider groupMappingServiceProvider;
     private Set<String> supervisors;
     private Set<String> admins;
@@ -54,7 +54,7 @@ public class BlobStoreAclHandler {
     private boolean doAclValidation;
 
     public BlobStoreAclHandler(Map<String, Object> conf) {
-        _ptol = ClientAuthUtils.getPrincipalToLocalPlugin(conf);
+        ptol = ClientAuthUtils.getPrincipalToLocalPlugin(conf);
         if (conf.get(Config.STORM_GROUP_MAPPING_SERVICE_PROVIDER_PLUGIN) != null) {
             groupMappingServiceProvider = ClientAuthUtils.getGroupMappingServiceProviderPlugin(conf);
         } else {
@@ -77,7 +77,7 @@ public class BlobStoreAclHandler {
         }
     }
 
-    private static AccessControlType parseACLType(String type) {
+    private static AccessControlType parseAclType(String type) {
         if ("other".equalsIgnoreCase(type) || "o".equalsIgnoreCase(type)) {
             return AccessControlType.OTHER;
         } else if ("user".equalsIgnoreCase(type) || "u".equalsIgnoreCase(type)) {
@@ -125,7 +125,7 @@ public class BlobStoreAclHandler {
             access = parts[2];
         }
         AccessControl ret = new AccessControl();
-        ret.set_type(parseACLType(type));
+        ret.set_type(parseAclType(type));
         ret.set_name(name);
         ret.set_access(parseAccess(access));
         return ret;
@@ -160,6 +160,7 @@ public class BlobStoreAclHandler {
         return ret.toString();
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public static void validateSettableACLs(String key, List<AccessControl> acls) throws AuthorizationException {
         Set<String> aclUsers = new HashSet<>();
         List<String> duplicateUsers = new ArrayList<>();
@@ -181,7 +182,7 @@ public class BlobStoreAclHandler {
         Set<String> user = new HashSet<String>();
         if (who != null) {
             for (Principal p : who.getPrincipals()) {
-                user.add(_ptol.toLocal(p));
+                user.add(ptol.toLocal(p));
             }
         }
         return user;
@@ -264,7 +265,6 @@ public class BlobStoreAclHandler {
      *             5 implies READ and ADMIN privileges.
      * @param who  Is the user against whom the permissions are validated for a key using the ACL and the mask.
      * @param key  Key used to identify the blob.
-     * @throws AuthorizationException
      */
     public void hasAnyPermissions(List<AccessControl> acl, int mask, Subject who, String key) throws AuthorizationException {
         if (!doAclValidation) {
@@ -294,7 +294,6 @@ public class BlobStoreAclHandler {
      *             5 implies READ and ADMIN privileges.
      * @param who  Is the user against whom the permissions are validated for a key using the ACL and the mask.
      * @param key  Key used to identify the blob.
-     * @throws AuthorizationException
      */
     public void hasPermissions(List<AccessControl> acl, int mask, Subject who, String key) throws AuthorizationException {
         if (!doAclValidation) {
@@ -318,7 +317,7 @@ public class BlobStoreAclHandler {
     }
 
     public void normalizeSettableBlobMeta(String key, SettableBlobMeta meta, Subject who, int opMask) {
-        meta.set_acl(normalizeSettableACLs(key, meta.get_acl(), who, opMask));
+        meta.set_acl(normalizeSettableAcls(key, meta.get_acl(), who, opMask));
     }
 
     private String namedPerms(int mask) {
@@ -351,7 +350,7 @@ public class BlobStoreAclHandler {
         }
     }
 
-    private List<AccessControl> removeBadACLs(List<AccessControl> accessControls) {
+    private List<AccessControl> removeBadAcls(List<AccessControl> accessControls) {
         List<AccessControl> resultAcl = new ArrayList<AccessControl>();
         for (AccessControl control : accessControls) {
             if (control.get_type().equals(AccessControlType.OTHER) && (control.get_access() == 0)) {
@@ -364,12 +363,12 @@ public class BlobStoreAclHandler {
         return resultAcl;
     }
 
-    private final List<AccessControl> normalizeSettableACLs(String key, List<AccessControl> acls, Subject who,
+    private final List<AccessControl> normalizeSettableAcls(String key, List<AccessControl> acls, Subject who,
                                                             int opMask) {
-        List<AccessControl> cleanAcls = removeBadACLs(acls);
+        List<AccessControl> cleanAcls = removeBadAcls(acls);
         Set<String> userNames = getUserNamesFromSubject(who);
         for (String user : userNames) {
-            fixACLsForUser(cleanAcls, user, opMask);
+            fixAclsForUser(cleanAcls, user, opMask);
         }
         fixEmptyNameACLForUsers(cleanAcls, userNames, opMask);
         if ((who == null || userNames.isEmpty()) && !worldEverything(acls)) {
@@ -393,39 +392,40 @@ public class BlobStoreAclHandler {
         return isWorldEverything;
     }
 
-    private void fixACLsForUser(List<AccessControl> acls, String user, int mask) {
-        boolean foundUserACL = false;
-        List<AccessControl> emptyUserACLs = new ArrayList<>();
+    private void fixAclsForUser(List<AccessControl> acls, String user, int mask) {
+        boolean foundUserAcl = false;
+        List<AccessControl> emptyUserAcls = new ArrayList<>();
 
         for (AccessControl control : acls) {
             if (control.get_type() == AccessControlType.USER) {
                 if (!control.is_set_name()) {
-                    emptyUserACLs.add(control);
+                    emptyUserAcls.add(control);
                 } else if (control.get_name().equals(user)) {
                     int currentAccess = control.get_access();
                     if ((currentAccess & mask) != mask) {
                         control.set_access(currentAccess | mask);
                     }
-                    foundUserACL = true;
+                    foundUserAcl = true;
                 }
             }
         }
 
         // if ACLs have two user ACLs for empty user and principal, discard empty user ACL
-        if (!emptyUserACLs.isEmpty() && foundUserACL) {
-            acls.removeAll(emptyUserACLs);
+        if (!emptyUserAcls.isEmpty() && foundUserAcl) {
+            acls.removeAll(emptyUserAcls);
         }
 
         // add default user ACL when only empty user ACL is not present
-        if (emptyUserACLs.isEmpty() && !foundUserACL) {
-            AccessControl userACL = new AccessControl();
-            userACL.set_type(AccessControlType.USER);
-            userACL.set_name(user);
-            userACL.set_access(mask);
-            acls.add(userACL);
+        if (emptyUserAcls.isEmpty() && !foundUserAcl) {
+            AccessControl userAcl = new AccessControl();
+            userAcl.set_type(AccessControlType.USER);
+            userAcl.set_name(user);
+            userAcl.set_access(mask);
+            acls.add(userAcl);
         }
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private void fixEmptyNameACLForUsers(List<AccessControl> acls, Set<String> users, int mask) {
         List<AccessControl> aclsToAdd = new ArrayList<>();
         List<AccessControl> aclsToRemove = new ArrayList<>();
@@ -455,7 +455,7 @@ public class BlobStoreAclHandler {
         Set<String> user = new HashSet<String>();
         if (who != null) {
             for (Principal p : who.getPrincipals()) {
-                user.add(_ptol.toLocal(p));
+                user.add(ptol.toLocal(p));
             }
         }
         return user;
diff --git a/storm-client/src/jvm/org/apache/storm/blobstore/ClientBlobStore.java b/storm-client/src/jvm/org/apache/storm/blobstore/ClientBlobStore.java
index ee4387f..a83146d 100644
--- a/storm-client/src/jvm/org/apache/storm/blobstore/ClientBlobStore.java
+++ b/storm-client/src/jvm/org/apache/storm/blobstore/ClientBlobStore.java
@@ -25,16 +25,17 @@ import org.apache.storm.utils.NimbusClient;
 import org.apache.storm.utils.Utils;
 
 /**
- * The ClientBlobStore has two concrete implementations 1. NimbusBlobStore 2. HdfsClientBlobStore
+ * The ClientBlobStore has two concrete implementations 1. NimbusBlobStore 2. HdfsClientBlobStore.
  *
- * Create, update, read and delete are some of the basic operations defined by this interface. Each operation is validated for permissions
- * against an user. We currently have NIMBUS_ADMINS and SUPERVISOR_ADMINS configuration. NIMBUS_ADMINS are given READ, WRITE and ADMIN
- * access whereas the SUPERVISOR_ADMINS are given READ access in order to read and download the blobs form the nimbus.
+ * <p>Create, update, read and delete are some of the basic operations defined by this interface. Each operation is
+ * validated for permissions against an user. We currently have NIMBUS_ADMINS and SUPERVISOR_ADMINS configuration.
+ * NIMBUS_ADMINS are given READ, WRITE and ADMIN access whereas the SUPERVISOR_ADMINS are given READ access in order to
+ * read and download the blobs form the nimbus.
  *
- * The ACLs for the blob store are validated against whether the subject is a NIMBUS_ADMIN, SUPERVISOR_ADMIN or USER who has read, write or
- * admin privileges in order to perform respective operations on the blob.
+ * <p>The ACLs for the blob store are validated against whether the subject is a NIMBUS_ADMIN, SUPERVISOR_ADMIN or USER
+ * who has read, write or admin privileges in order to perform respective operations on the blob.
  *
- * For more detailed implementation
+ * <p>For more detailed implementation
  *
  * @see org.apache.storm.blobstore.NimbusBlobStore
  */
@@ -50,19 +51,16 @@ public abstract class ClientBlobStore implements Shutdownable, AutoCloseable {
     /**
      * Sets up the client API by parsing the configs.
      *
-     * @param conf The storm conf containing the config details.
+     * @param conf The storm conf containing the config details
      */
     public abstract void prepare(Map<String, Object> conf);
 
     /**
      * Client facing API to create a blob.
      *
-     * @param key  blob key name.
-     * @param meta contains ACL information.
-     * @return AtomicOutputStream returns an output stream into which data can be written.
-     *
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
+     * @param key  blob key name
+     * @param meta contains ACL information
+     * @return AtomicOutputStream returns an output stream into which data can be written
      */
     protected abstract AtomicOutputStream createBlobToExtend(String key, SettableBlobMeta meta) throws AuthorizationException,
         KeyAlreadyExistsException;
@@ -70,22 +68,16 @@ public abstract class ClientBlobStore implements Shutdownable, AutoCloseable {
     /**
      * Client facing API to update a blob.
      *
-     * @param key blob key name.
-     * @return AtomicOutputStream returns an output stream into which data can be written.
-     *
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key blob key name
+     * @return AtomicOutputStream returns an output stream into which data can be written
      */
     public abstract AtomicOutputStream updateBlob(String key) throws AuthorizationException, KeyNotFoundException;
 
     /**
      * Client facing API to read the metadata information.
      *
-     * @param key blob key name.
-     * @return AtomicOutputStream returns an output stream into which data can be written.
-     *
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key blob key name
+     * @return AtomicOutputStream returns an output stream into which data can be written
      */
     public abstract ReadableBlobMeta getBlobMeta(String key) throws AuthorizationException, KeyNotFoundException;
 
@@ -98,34 +90,28 @@ public abstract class ClientBlobStore implements Shutdownable, AutoCloseable {
     /**
      * Client facing API to set the metadata for a blob.
      *
-     * @param key  blob key name.
-     * @param meta contains ACL information.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key  blob key name
+     * @param meta contains ACL information
      */
     protected abstract void setBlobMetaToExtend(String key, SettableBlobMeta meta) throws AuthorizationException, KeyNotFoundException;
 
     /**
      * Client facing API to delete a blob.
      *
-     * @param key blob key name.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key blob key name
      */
     public abstract void deleteBlob(String key) throws AuthorizationException, KeyNotFoundException;
 
     /**
      * Client facing API to read a blob.
      *
-     * @param key blob key name.
-     * @return an InputStream to read the metadata for a blob.
-     *
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key blob key name
+     * @return an InputStream to read the metadata for a blob
      */
     public abstract InputStreamWithMeta getBlob(String key) throws AuthorizationException, KeyNotFoundException;
 
     /**
+     * List keys.
      * @return Iterator for a list of keys currently present in the blob store.
      */
     public abstract Iterator<String> listKeys();
@@ -133,23 +119,17 @@ public abstract class ClientBlobStore implements Shutdownable, AutoCloseable {
     /**
      * Client facing API to read the replication of a blob.
      *
-     * @param key blob key name.
-     * @return int indicates the replication factor of a blob.
-     *
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key blob key name
+     * @return int indicates the replication factor of a blob
      */
     public abstract int getBlobReplication(String key) throws AuthorizationException, KeyNotFoundException;
 
     /**
      * Client facing API to update the replication of a blob.
      *
-     * @param key         blob key name.
-     * @param replication int indicates the replication factor a blob has to be set.
-     * @return int indicates the replication factor of a blob.
-     *
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key         blob key name
+     * @param replication int indicates the replication factor a blob has to be set
+     * @return int indicates the replication factor of a blob
      */
     public abstract int updateBlobReplication(String key, int replication) throws AuthorizationException, KeyNotFoundException;
 
@@ -163,10 +143,8 @@ public abstract class ClientBlobStore implements Shutdownable, AutoCloseable {
     public abstract boolean setClient(Map<String, Object> conf, NimbusClient client);
 
     /**
-     * Creates state inside a zookeeper. Required for blobstore to write to zookeeper when Nimbus HA is turned on in order to maintain state
-     * consistency
-     *
-     * @param key
+     * Creates state inside a zookeeper. Required for blobstore to write to zookeeper when Nimbus HA is turned on in
+     * order to maintain state consistency.
      */
     public abstract void createStateInZookeeper(String key);
 
@@ -176,12 +154,9 @@ public abstract class ClientBlobStore implements Shutdownable, AutoCloseable {
     /**
      * Client facing API to create a blob.
      *
-     * @param key  blob key name.
-     * @param meta contains ACL information.
-     * @return AtomicOutputStream returns an output stream into which data can be written.
-     *
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
+     * @param key  blob key name
+     * @param meta contains ACL information
+     * @return AtomicOutputStream returns an output stream into which data can be written
      */
     public final AtomicOutputStream createBlob(String key, SettableBlobMeta meta) throws AuthorizationException, KeyAlreadyExistsException {
         if (meta != null && meta.is_set_acl()) {
@@ -193,10 +168,8 @@ public abstract class ClientBlobStore implements Shutdownable, AutoCloseable {
     /**
      * Client facing API to set the metadata for a blob.
      *
-     * @param key  blob key name.
-     * @param meta contains ACL information.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
+     * @param key  blob key name
+     * @param meta contains ACL information
      */
     public final void setBlobMeta(String key, SettableBlobMeta meta) throws AuthorizationException, KeyNotFoundException {
         if (meta != null && meta.is_set_acl()) {
diff --git a/storm-client/src/jvm/org/apache/storm/blobstore/NimbusBlobStore.java b/storm-client/src/jvm/org/apache/storm/blobstore/NimbusBlobStore.java
index 8b8a549..9a8f034 100644
--- a/storm-client/src/jvm/org/apache/storm/blobstore/NimbusBlobStore.java
+++ b/storm-client/src/jvm/org/apache/storm/blobstore/NimbusBlobStore.java
@@ -35,7 +35,7 @@ import org.slf4j.LoggerFactory;
  * NimbusBlobStore is a USER facing client API to perform basic operations such as create, update, delete and read for local and hdfs blob
  * store.
  *
- * For local blob store it is also the client facing API for supervisor in order to download blobs from nimbus.
+ * <p>For local blob store it is also the client facing API for supervisor in order to download blobs from nimbus.
  */
 public class NimbusBlobStore extends ClientBlobStore implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(NimbusBlobStore.class);
@@ -199,6 +199,7 @@ public class NimbusBlobStore extends ClientBlobStore implements AutoCloseable {
     }
 
     @Override
+    @SuppressWarnings("checkstyle:NoFinalizer")
     protected void finalize() {
         shutdown();
     }
@@ -325,6 +326,11 @@ public class NimbusBlobStore extends ClientBlobStore implements AutoCloseable {
             }
         }
 
+        @Override
+        public synchronized int read(byte[] b) throws IOException {
+            return read(b, 0, b.length);
+        }
+
         private boolean isEmpty() {
             return buffer == null || offset >= end;
         }
@@ -346,11 +352,6 @@ public class NimbusBlobStore extends ClientBlobStore implements AutoCloseable {
         }
 
         @Override
-        public synchronized int read(byte[] b) throws IOException {
-            return read(b, 0, b.length);
-        }
-
-        @Override
         public synchronized int available() {
             return buffer == null ? 0 : (end - offset);
         }
diff --git a/storm-client/src/jvm/org/apache/storm/bolt/JoinBolt.java b/storm-client/src/jvm/org/apache/storm/bolt/JoinBolt.java
index e556a63..6246966 100644
--- a/storm-client/src/jvm/org/apache/storm/bolt/JoinBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/bolt/JoinBolt.java
@@ -12,7 +12,6 @@
 
 package org.apache.storm.bolt;
 
-
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -29,7 +28,6 @@ import org.apache.storm.tuple.Tuple;
 import org.apache.storm.windowing.TimestampExtractor;
 import org.apache.storm.windowing.TupleWindow;
 
-
 public class JoinBolt extends BaseWindowedBolt {
 
     protected final Selector selectorType;
@@ -75,11 +73,15 @@ public class JoinBolt extends BaseWindowedBolt {
     }
 
     /**
-     * Performs inner Join with the newStream. SQL    :   from priorStream inner join newStream on newStream.field = priorStream.field1 same
-     * as:   new WindowedQueryBolt(priorStream,field1). join(newStream, field, priorStream);
-     *
-     * Note: priorStream must be previously joined. Valid ex:    new WindowedQueryBolt(s1,k1). join(s2,k2, s1). join(s3,k3, s2); Invalid ex:
-     * new WindowedQueryBolt(s1,k1). join(s3,k3, s2). join(s2,k2, s1);
+     * Performs inner Join with the newStream.
+     * SQL:
+     * <code>from priorStream inner join newStream on newStream.field = priorStream.field1</code>
+     * same as:
+     * <code>new WindowedQueryBolt(priorStream,field1). join(newStream, field, priorStream);</code>
+     * Note: priorStream must be previously joined. Valid ex:
+     * <code>new WindowedQueryBolt(s1,k1). join(s2,k2, s1). join(s3,k3, s2);</code>
+     * Invalid ex:
+     * <code>new WindowedQueryBolt(s1,k1). join(s3,k3, s2). join(s2,k2, s1);</code>
      *
      * @param newStream Either stream name or name of upstream component
      * @param field     the field on which to perform the join
@@ -92,7 +94,7 @@ public class JoinBolt extends BaseWindowedBolt {
      * Performs left Join with the newStream. SQL    :   from stream1  left join stream2  on stream2.field = stream1.field1 same as:   new
      * WindowedQueryBolt(stream1, field1). leftJoin(stream2, field, stream1);
      *
-     * Note: priorStream must be previously joined Valid ex:    new WindowedQueryBolt(s1,k1). leftJoin(s2,k2, s1). leftJoin(s3,k3, s2);
+     * <p>Note: priorStream must be previously joined Valid ex:    new WindowedQueryBolt(s1,k1). leftJoin(s2,k2, s1). leftJoin(s3,k3, s2);
      * Invalid ex:  new WindowedQueryBolt(s1,k1). leftJoin(s3,k3, s2). leftJoin(s2,k2, s1);
      *
      * @param newStream Either a name of a stream or an upstream component
@@ -122,9 +124,6 @@ public class JoinBolt extends BaseWindowedBolt {
      * Key names are supported for nested types: e.g: .select("outerKey1.innerKey1, outerKey1.innerKey2, stream3:outerKey2.innerKey3)" Inner
      * types (non leaf) must be Map<> in order to support nested lookup using this dot notation This selected fields implicitly declare the
      * output fieldNames for the bolt based.
-     *
-     * @param commaSeparatedKeys
-     * @return
      */
     public JoinBolt select(String commaSeparatedKeys) {
         String[] fieldNames = commaSeparatedKeys.split(",");
@@ -339,8 +338,8 @@ public class JoinBolt extends BaseWindowedBolt {
     protected Object lookupField(FieldSelector fieldSelector, Tuple tuple) {
 
         // very stream name matches, it stream name was specified
-        if (fieldSelector.streamName != null &&
-            !fieldSelector.streamName.equalsIgnoreCase(getStreamSelector(tuple))) {
+        if (fieldSelector.streamName != null
+                && !fieldSelector.streamName.equalsIgnoreCase(getStreamSelector(tuple))) {
             return null;
         }
 
@@ -435,13 +434,18 @@ public class JoinBolt extends BaseWindowedBolt {
         STREAM, SOURCE
     }
 
-    protected enum JoinType {INNER, LEFT, RIGHT, OUTER}
+    protected enum JoinType {
+        INNER,
+        LEFT,
+        RIGHT,
+        OUTER
+    }
 
     /**
-     * Describes how to join the other stream with the current stream
+     * Describes how to join the other stream with the current stream.
      */
     protected static class JoinInfo implements Serializable {
-        final static long serialVersionUID = 1L;
+        static final long serialVersionUID = 1L;
 
         private JoinType joinType;        // nature of join
         private FieldSelector field;           // field for the current stream
@@ -505,6 +509,8 @@ public class JoinBolt extends BaseWindowedBolt {
         }
 
         /**
+         * Constructor.
+         *
          * @param stream          name of stream
          * @param fieldDescriptor Simple fieldDescriptor like "x.y.z" and w/o a 'stream1:' stream qualifier.
          */
diff --git a/storm-client/src/jvm/org/apache/storm/callback/ZKStateChangedCallback.java b/storm-client/src/jvm/org/apache/storm/callback/ZKStateChangedCallback.java
index 08672fa..30f9b5d 100644
--- a/storm-client/src/jvm/org/apache/storm/callback/ZKStateChangedCallback.java
+++ b/storm-client/src/jvm/org/apache/storm/callback/ZKStateChangedCallback.java
@@ -14,6 +14,7 @@ package org.apache.storm.callback;
 
 import org.apache.storm.shade.org.apache.zookeeper.Watcher;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public interface ZKStateChangedCallback {
     public void changed(Watcher.Event.EventType type, String path);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/ClusterUtils.java b/storm-client/src/jvm/org/apache/storm/cluster/ClusterUtils.java
index 145ec99..c0bacc4 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/ClusterUtils.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/ClusterUtils.java
@@ -224,7 +224,7 @@ public class ClusterUtils {
     }
 
     /**
-     * Get the path to secret keys for a specific topology
+     * Get the path to secret keys for a specific topology.
      *
      * @param type       the service the secret is for.
      * @param topologyId the topology the secret is for.
@@ -254,11 +254,7 @@ public class ClusterUtils {
     }
 
     /**
-     * Ensures that we only return heartbeats for executors assigned to this worker
-     *
-     * @param executors
-     * @param workerHeartbeat
-     * @return
+     * Ensures that we only return heartbeats for executors assigned to this worker.
      */
     public static Map<ExecutorInfo, ExecutorBeat> convertExecutorBeats(List<ExecutorInfo> executors,
                                                                        ClusterWorkerHeartbeat workerHeartbeat) {
@@ -276,18 +272,18 @@ public class ClusterUtils {
         return executorWhb;
     }
 
-    public static IStateStorage mkStateStorage(Map<String, Object> config, Map<String, Object> auth_conf,
+    public static IStateStorage mkStateStorage(Map<String, Object> config, Map<String, Object> authConf,
                                                ClusterStateContext context) throws Exception {
-        return _instance.mkStateStorageImpl(config, auth_conf, context);
+        return _instance.mkStateStorageImpl(config, authConf, context);
     }
 
-    public static IStormClusterState mkStormClusterState(Object StateStorage, ILocalAssignmentsBackend backend,
+    public static IStormClusterState mkStormClusterState(Object stateStorage, ILocalAssignmentsBackend backend,
                                                          ClusterStateContext context) throws Exception {
-        return _instance.mkStormClusterStateImpl(StateStorage, backend, context);
+        return _instance.mkStormClusterStateImpl(stateStorage, backend, context);
     }
 
-    public static IStormClusterState mkStormClusterState(Object StateStorage, ClusterStateContext context) throws Exception {
-        return _instance.mkStormClusterStateImpl(StateStorage, LocalAssignmentsBackendFactory.getDefault(), context);
+    public static IStormClusterState mkStormClusterState(Object stateStorage, ClusterStateContext context) throws Exception {
+        return _instance.mkStormClusterStateImpl(stateStorage, LocalAssignmentsBackendFactory.getDefault(), context);
     }
 
     public static String stringifyError(Throwable error) {
@@ -302,13 +298,13 @@ public class ClusterUtils {
         if (stateStorage instanceof IStateStorage) {
             return new StormClusterStateImpl((IStateStorage) stateStorage, backend, context, false);
         } else {
-            IStateStorage Storage = _instance.mkStateStorageImpl((Map<String, Object>) stateStorage,
+            IStateStorage storage = _instance.mkStateStorageImpl((Map<String, Object>) stateStorage,
                                                                  (Map<String, Object>) stateStorage, context);
-            return new StormClusterStateImpl(Storage, backend, context, true);
+            return new StormClusterStateImpl(storage, backend, context, true);
         }
     }
 
-    public IStateStorage mkStateStorageImpl(Map<String, Object> config, Map<String, Object> auth_conf, ClusterStateContext context) throws
+    public IStateStorage mkStateStorageImpl(Map<String, Object> config, Map<String, Object> authConf, ClusterStateContext context) throws
         Exception {
         String className = null;
         IStateStorage stateStorage = null;
@@ -319,7 +315,7 @@ public class ClusterUtils {
         }
         Class clazz = Class.forName(className);
         StateStorageFactory storageFactory = (StateStorageFactory) clazz.newInstance();
-        stateStorage = storageFactory.mkStore(config, auth_conf, context);
+        stateStorage = storageFactory.mkStore(config, authConf, context);
         return stateStorage;
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/IStateStorage.java b/storm-client/src/jvm/org/apache/storm/cluster/IStateStorage.java
index b673932..da16ea3 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/IStateStorage.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/IStateStorage.java
@@ -19,14 +19,16 @@ import org.apache.storm.shade.org.apache.curator.framework.state.ConnectionState
 import org.apache.storm.shade.org.apache.zookeeper.data.ACL;
 
 /**
- * StateStorage provides the API for the pluggable state store used by the Storm daemons. Data is stored in path/value format, and the store
- * supports listing sub-paths at a given path. All data should be available across all nodes with eventual consistency.
+ * StateStorage provides the API for the pluggable state store used by the Storm daemons. Data is stored in path/value
+ * format, and the store supports listing sub-paths at a given path. All data should be available across all nodes with
+ * eventual consistency.
  *
- * IMPORTANT NOTE: Heartbeats have different api calls used to interact with them. The root path (/) may or may not be the same as the root
- * path for the other api calls.
+ * <p>IMPORTANT NOTE: Heartbeats have different api calls used to interact with them. The root path (/) may or may not
+ * be the same as the root path for the other api calls.
  *
- * For example, performing these two calls: set_data("/path", data, acls); void set_worker_hb("/path", heartbeat, acls); may or may not
- * cause a collision in "/path". Never use the same paths with the *_hb* methods as you do with the others.
+ * <p>For example, performing these two calls: set_data("/path", data, acls); void set_worker_hb("/path", heartbeat,
+ * acls); may or may not cause a collision in "/path". Never use the same paths with the *_hb* methods as you do with
+ * the others.
  */
 public interface IStateStorage extends Closeable {
 
@@ -200,7 +202,7 @@ public interface IStateStorage extends Closeable {
     void sync_path(String path);
 
     /**
-     * Allows us to delete the znodes within /storm/blobstore/key_name whose znodes start with the corresponding nimbusHostPortInfo
+     * Allows us to delete the znodes within /storm/blobstore/key_name whose znodes start with the corresponding nimbusHostPortInfo.
      *
      * @param path               /storm/blobstore/key_name
      * @param nimbusHostPortInfo Contains the host port information of a nimbus node.
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/IStormClusterState.java b/storm-client/src/jvm/org/apache/storm/cluster/IStormClusterState.java
index c586dd1..94f0c86 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/IStormClusterState.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/IStormClusterState.java
@@ -142,6 +142,7 @@ public interface IStormClusterState {
     List<String> errorTopologies();
 
     /**
+     * Get backpressure topologies.
      * @deprecated: In Storm 2.0. Retained for enabling transition from 1.x. Will be removed soon.
      */
     @Deprecated
@@ -149,6 +150,7 @@ public interface IStormClusterState {
 
     /**
      * Get leader info from state store, which was written when a master gains leadership.
+     *
      * <p>Caution: it can not be used for fencing and is only for informational purposes because we use ZK as our
      * backend now, which could have a overdue info of nodes.
      *
@@ -168,24 +170,28 @@ public interface IStormClusterState {
     void supervisorHeartbeat(String supervisorId, SupervisorInfo info);
 
     /**
+     * Get topoloy backpressure.
      * @deprecated: In Storm 2.0. Retained for enabling transition from 1.x. Will be removed soon.
      */
     @Deprecated
     boolean topologyBackpressure(String stormId, long timeoutMs, Runnable callback);
 
     /**
+     * Setup backpressure.
      * @deprecated: In Storm 2.0. Retained for enabling transition from 1.x. Will be removed soon.
      */
     @Deprecated
     void setupBackpressure(String stormId, Map<String, Object> topoConf);
 
     /**
+     * Remove backpressure.
      * @deprecated: In Storm 2.0. Retained for enabling transition from 1.x. Will be removed soon.
      */
     @Deprecated
     void removeBackpressure(String stormId);
 
     /**
+     * Remove worker backpressure.
      * @deprecated: In Storm 2.0. Retained for enabling transition from 1.x. Will be removed soon.
      */
     @Deprecated
@@ -291,6 +297,7 @@ public interface IStormClusterState {
     }
 
     /**
+     * Get all supervisor info.
      * @param callback be alerted if the list of supervisors change
      * @return All of the supervisors with the ID as the key
      */
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/PaceMakerStateStorage.java b/storm-client/src/jvm/org/apache/storm/cluster/PaceMakerStateStorage.java
index 277a150..401771a 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/PaceMakerStateStorage.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/PaceMakerStateStorage.java
@@ -149,8 +149,8 @@ public class PaceMakerStateStorage implements IStateStorage {
         while (true) {
             try {
                 byte[] ret = null;
-                int latest_time_secs = 0;
-                boolean got_response = false;
+                int latestTimeSecs = 0;
+                boolean gotResponse = false;
 
                 HBMessage message = new HBMessage(HBServerMessageType.GET_PULSE, HBMessageData.path(path));
                 List<HBMessage> responses = pacemakerClientPool.sendAll(message);
@@ -160,18 +160,18 @@ public class PaceMakerStateStorage implements IStateStorage {
                         continue;
                     }
                     // We got at least one GET_PULSE_RESPONSE message.
-                    got_response = true;
+                    gotResponse = true;
                     byte[] details = response.get_data().get_pulse().get_details();
                     if (details == null) {
                         continue;
                     }
                     ClusterWorkerHeartbeat cwh = Utils.deserialize(details, ClusterWorkerHeartbeat.class);
-                    if (cwh != null && cwh.get_time_secs() > latest_time_secs) {
-                        latest_time_secs = cwh.get_time_secs();
+                    if (cwh != null && cwh.get_time_secs() > latestTimeSecs) {
+                        latestTimeSecs = cwh.get_time_secs();
                         ret = details;
                     }
                 }
-                if (!got_response) {
+                if (!gotResponse) {
                     throw new WrappedHBExecutionException("Failed to get a response.");
                 }
                 return ret;
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/PaceMakerStateStorageFactory.java b/storm-client/src/jvm/org/apache/storm/cluster/PaceMakerStateStorageFactory.java
index 5e0cdd7..596aefa 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/PaceMakerStateStorageFactory.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/PaceMakerStateStorageFactory.java
@@ -24,10 +24,10 @@ import org.apache.storm.utils.Utils;
 
 public class PaceMakerStateStorageFactory implements StateStorageFactory {
     @Override
-    public IStateStorage mkStore(Map<String, Object> config, Map<String, Object> auth_conf, ClusterStateContext context) {
+    public IStateStorage mkStore(Map<String, Object> config, Map<String, Object> authConf, ClusterStateContext context) {
         try {
             ZKStateStorageFactory zkfact = new ZKStateStorageFactory();
-            IStateStorage zkState = zkfact.mkStore(config, auth_conf, context);
+            IStateStorage zkState = zkfact.mkStore(config, authConf, context);
             return new PaceMakerStateStorage(new PacemakerClientPool(config), zkState);
         } catch (Exception e) {
             throw Utils.wrapInRuntime(e);
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/StateStorageFactory.java b/storm-client/src/jvm/org/apache/storm/cluster/StateStorageFactory.java
index ee530a0..fb321f7 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/StateStorageFactory.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/StateStorageFactory.java
@@ -16,5 +16,5 @@ import java.util.Map;
 
 public interface StateStorageFactory {
 
-    IStateStorage mkStore(Map<String, Object> config, Map<String, Object> auth_conf, ClusterStateContext context);
+    IStateStorage mkStore(Map<String, Object> config, Map<String, Object> authConf, ClusterStateContext context);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/StormClusterStateImpl.java b/storm-client/src/jvm/org/apache/storm/cluster/StormClusterStateImpl.java
index f330278..80d5fb0 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/StormClusterStateImpl.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/StormClusterStateImpl.java
@@ -73,10 +73,10 @@ public class StormClusterStateImpl implements IStormClusterState {
     private ConcurrentHashMap<String, Runnable> credentialsCallback;
     private ConcurrentHashMap<String, Runnable> logConfigCallback;
 
-    public StormClusterStateImpl(IStateStorage StateStorage, ILocalAssignmentsBackend assignmentsassignmentsBackend,
+    public StormClusterStateImpl(IStateStorage stateStorage, ILocalAssignmentsBackend assignmentsassignmentsBackend,
                                  ClusterStateContext context, boolean shouldCloseStateStorageOnDisconnect) throws Exception {
 
-        this.stateStorage = StateStorage;
+        this.stateStorage = stateStorage;
         this.shouldCloseStateStorageOnDisconnect = shouldCloseStateStorageOnDisconnect;
         this.defaultAcls = context.getDefaultZkAcls();
         this.context = context;
@@ -591,10 +591,7 @@ public class StormClusterStateImpl implements IStormClusterState {
     }
 
     /**
-     * To update this function due to APersistentMap/APersistentSet is clojure's structure
-     *
-     * @param stormId
-     * @param newElems
+     * To update this function due to APersistentMap/APersistentSet is clojure's structure.
      */
     @Override
     public void updateStorm(String stormId, StormBase newElems) {
@@ -617,7 +614,7 @@ public class StormClusterStateImpl implements IStormClusterState {
             }
         }
 
-        Map<String, DebugOptions> ComponentDebug = new HashMap<>();
+        Map<String, DebugOptions> componentDebug = new HashMap<>();
         Map<String, DebugOptions> oldComponentDebug = stormBase.get_component_debug();
 
         Map<String, DebugOptions> newComponentDebug = newElems.get_component_debug();
@@ -639,10 +636,10 @@ public class StormClusterStateImpl implements IStormClusterState {
             DebugOptions debugOptions = new DebugOptions();
             debugOptions.set_enable(enable);
             debugOptions.set_samplingpct(samplingpct);
-            ComponentDebug.put(key, debugOptions);
+            componentDebug.put(key, debugOptions);
         }
-        if (ComponentDebug.size() > 0) {
-            newElems.set_component_debug(ComponentDebug);
+        if (componentDebug.size() > 0) {
+            newElems.set_component_debug(componentDebug);
         }
 
         if (StringUtils.isBlank(newElems.get_name())) {
@@ -746,13 +743,13 @@ public class StormClusterStateImpl implements IStormClusterState {
     @Override
     public void reportError(String stormId, String componentId, String node, Long port, Throwable error) {
         String path = ClusterUtils.errorPath(stormId, componentId);
-        String lastErrorPath = ClusterUtils.lastErrorPath(stormId, componentId);
         ErrorInfo errorInfo = new ErrorInfo(ClusterUtils.stringifyError(error), Time.currentTimeSecs());
         errorInfo.set_host(node);
         errorInfo.set_port(port.intValue());
         byte[] serData = Utils.serialize(errorInfo);
         stateStorage.mkdirs(path, defaultAcls);
         stateStorage.create_sequential(path + ClusterUtils.ZK_SEPERATOR + "e", serData, defaultAcls);
+        String lastErrorPath = ClusterUtils.lastErrorPath(stormId, componentId);
         stateStorage.set_data(lastErrorPath, serData, defaultAcls);
         List<String> childrens = stateStorage.get_children(path, false);
 
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/ZKStateStorage.java b/storm-client/src/jvm/org/apache/storm/cluster/ZKStateStorage.java
index 2477f34..64ecf76 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/ZKStateStorage.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/ZKStateStorage.java
@@ -38,6 +38,7 @@ import org.apache.storm.zookeeper.ClientZookeeper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class ZKStateStorage implements IStateStorage {
 
     private static Logger LOG = LoggerFactory.getLogger(ZKStateStorage.class);
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/ZKStateStorageFactory.java b/storm-client/src/jvm/org/apache/storm/cluster/ZKStateStorageFactory.java
index 11feedc..ff22019 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/ZKStateStorageFactory.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/ZKStateStorageFactory.java
@@ -21,12 +21,13 @@ package org.apache.storm.cluster;
 import java.util.Map;
 import org.apache.storm.utils.Utils;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class ZKStateStorageFactory implements StateStorageFactory {
 
     @Override
-    public IStateStorage mkStore(Map<String, Object> config, Map<String, Object> auth_conf, ClusterStateContext context) {
+    public IStateStorage mkStore(Map<String, Object> config, Map<String, Object> authConf, ClusterStateContext context) {
         try {
-            return new ZKStateStorage(config, auth_conf, context);
+            return new ZKStateStorage(config, authConf, context);
         } catch (Exception e) {
             throw Utils.wrapInRuntime(e);
         }
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCenter.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCenter.java
index 59ff7e6..67c0482 100644
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCenter.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCenter.java
@@ -37,7 +37,7 @@ public class CgroupCenter implements CgroupOperation {
 
     }
 
-    public synchronized static CgroupCenter getInstance() {
+    public static synchronized CgroupCenter getInstance() {
         if (CgroupUtils.enabled()) {
             instance = new CgroupCenter();
             return instance;
@@ -82,10 +82,10 @@ public class CgroupCenter implements CgroupOperation {
                 if (type == null) {
                     continue;
                 }
-                int hierarchyID = Integer.valueOf(split[1]);
+                int hierarchyId = Integer.valueOf(split[1]);
                 int cgroupNum = Integer.valueOf(split[2]);
                 boolean enable = Integer.valueOf(split[3]).intValue() == 1 ? true : false;
-                subSystems.add(new SubSystem(type, hierarchyID, cgroupNum, enable));
+                subSystems.add(new SubSystem(type, hierarchyId, cgroupNum, enable));
             }
             return subSystems;
         } catch (Exception e) {
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommon.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommon.java
index 082989d..07f1abe 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommon.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommon.java
@@ -45,9 +45,6 @@ public class CgroupCommon implements CgroupCommonOperation {
         this.isRoot = false;
     }
 
-    /**
-     * rootCgroup
-     */
     public CgroupCommon(Hierarchy hierarchy, String dir) {
         this.name = "";
         this.hierarchy = hierarchy;
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommonOperation.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommonOperation.java
index c73d1f1..63802a0 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommonOperation.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommonOperation.java
@@ -18,61 +18,61 @@ import java.util.Set;
 public interface CgroupCommonOperation {
 
     /**
-     * add task to cgroup
+     * add task to cgroup.
      *
      * @param taskid task id of task to add
      */
     public void addTask(int taskid) throws IOException;
 
     /**
-     * Get a list of task ids running in CGroup
+     * Get a list of task ids running in CGroup.
      */
     public Set<Integer> getTasks() throws IOException;
 
     /**
-     * add a process to cgroup
+     * add a process to cgroup.
      *
      * @param pid the PID of the process to add
      */
     public void addProcs(int pid) throws IOException;
 
     /**
-     * get the PIDs of processes running in cgroup
+     * get the PIDs of processes running in cgroup.
      */
     public Set<Long> getPids() throws IOException;
 
     /**
-     * to get the notify_on_release config
+     * to get the notify_on_release config.
      */
     public boolean getNotifyOnRelease() throws IOException;
 
     /**
-     * to set notify_on_release config in cgroup
+     * to set notify_on_release config in cgroup.
      */
     public void setNotifyOnRelease(boolean flag) throws IOException;
 
     /**
-     * get the command for the relase agent to execute
+     * get the command for the relase agent to execute.
      */
     public String getReleaseAgent() throws IOException;
 
     /**
-     * set a command for the release agent to execute
+     * set a command for the release agent to execute.
      */
     public void setReleaseAgent(String command) throws IOException;
 
     /**
-     * get the cgroup.clone_children config
+     * get the cgroup.clone_children config.
      */
     public boolean getCgroupCloneChildren() throws IOException;
 
     /**
-     * Set the cgroup.clone_children config
+     * Set the cgroup.clone_children config.
      */
     public void setCgroupCloneChildren(boolean flag) throws IOException;
 
     /**
-     * set event control config
+     * set event control config.
      */
     public void setEventControl(String eventFd, String controlFd, String... args) throws IOException;
 }
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupOperation.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupOperation.java
index 7ec2b47..6387fe3 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupOperation.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupOperation.java
@@ -23,52 +23,52 @@ import java.util.Set;
 public interface CgroupOperation {
 
     /**
-     * Get a list of hierarchies
+     * Get a list of hierarchies.
      */
     public List<Hierarchy> getHierarchies();
 
     /**
-     * get a list of available subsystems
+     * get a list of available subsystems.
      */
     public Set<SubSystem> getSubSystems();
 
     /**
-     * Check if a subsystem is enabled
+     * Check if a subsystem is enabled.
      */
     public boolean isSubSystemEnabled(SubSystemType subsystem);
 
     /**
-     * get the first hierarchy that has a certain subsystem isMounted
+     * get the first hierarchy that has a certain subsystem isMounted.
      */
     public Hierarchy getHierarchyWithSubSystem(SubSystemType subsystem);
 
     /**
-     * get the first hierarchy that has a certain list of subsystems isMounted
+     * get the first hierarchy that has a certain list of subsystems isMounted.
      */
     public Hierarchy getHierarchyWithSubSystems(List<SubSystemType> subSystems);
 
     /**
-     * check if a hiearchy is mounted
+     * check if a hiearchy is mounted.
      */
     public boolean isMounted(Hierarchy hierarchy);
 
     /**
-     * mount a hierarchy
+     * mount a hierarchy.
      */
     public void mount(Hierarchy hierarchy) throws IOException;
 
     /**
-     * umount a heirarchy
+     * umount a heirarchy.
      */
     public void umount(Hierarchy hierarchy) throws IOException;
 
     /**
-     * create a cgroup
+     * create a cgroup.
      */
     public void createCgroup(CgroupCommon cgroup) throws SecurityException;
 
     /**
-     * delete a cgroup
+     * delete a cgroup.
      */
     public void deleteCgroup(CgroupCommon cgroup) throws IOException;
 }
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupUtils.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupUtils.java
index 2990743..d55361e 100644
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupUtils.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupUtils.java
@@ -48,7 +48,7 @@ public class CgroupUtils {
     }
 
     /**
-     * Get a set of SubSystemType objects from a comma delimited list of subsystem names
+     * Get a set of SubSystemType objects from a comma delimited list of subsystem names.
      */
     public static Set<SubSystemType> getSubSystemsFromString(String str) {
         Set<SubSystemType> result = new HashSet<SubSystemType>();
@@ -64,7 +64,7 @@ public class CgroupUtils {
     }
 
     /**
-     * Get a string that is a comma delimited list of subsystems
+     * Get a string that is a comma delimited list of subsystems.
      */
     public static String subSystemsToString(Set<SubSystemType> subSystems) {
         StringBuilder sb = new StringBuilder();
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/Device.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/Device.java
index 9dfc15a..a949e87 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/Device.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/Device.java
@@ -13,7 +13,7 @@
 package org.apache.storm.container.cgroup;
 
 /**
- * a class that represents a device in linux
+ * a class that represents a device in linux.
  */
 public class Device {
 
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/Hierarchy.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/Hierarchy.java
index ea79918..7ef5296 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/Hierarchy.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/Hierarchy.java
@@ -15,7 +15,7 @@ package org.apache.storm.container.cgroup;
 import java.util.Set;
 
 /**
- * A class that describes a cgroup hierarchy
+ * A class that describes a cgroup hierarchy.
  */
 public class Hierarchy {
 
@@ -38,14 +38,14 @@ public class Hierarchy {
     }
 
     /**
-     * get subsystems
+     * get subsystems.
      */
     public Set<SubSystemType> getSubSystems() {
         return subSystems;
     }
 
     /**
-     * get all subsystems in hierarchy as a comma delimited list
+     * get all subsystems in hierarchy as a comma delimited list.
      */
     public String getType() {
         return type;
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/SubSystem.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/SubSystem.java
index 8a02584..f5aa32f 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/SubSystem.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/SubSystem.java
@@ -13,21 +13,22 @@
 package org.apache.storm.container.cgroup;
 
 /**
- * a class that implements operations that can be performed on a cgroup subsystem
+ * a class that implements operations that can be performed on a cgroup subsystem.
  */
 public class SubSystem {
 
     private SubSystemType type;
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private int hierarchyID;
 
     private int cgroupsNum;
 
     private boolean enable;
 
-    public SubSystem(SubSystemType type, int hierarchyID, int cgroupNum, boolean enable) {
+    public SubSystem(SubSystemType type, int hierarchyId, int cgroupNum, boolean enable) {
         this.type = type;
-        this.hierarchyID = hierarchyID;
+        this.hierarchyID = hierarchyId;
         this.cgroupsNum = cgroupNum;
         this.enable = enable;
     }
@@ -40,10 +41,12 @@ public class SubSystem {
         this.type = type;
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public int getHierarchyID() {
         return hierarchyID;
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public void setHierarchyID(int hierarchyID) {
         this.hierarchyID = hierarchyID;
     }
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/SubSystemType.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/SubSystemType.java
index ff8ab28..f2ea9dd 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/SubSystemType.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/SubSystemType.java
@@ -13,7 +13,7 @@
 package org.apache.storm.container.cgroup;
 
 /**
- * A enum class to described the subsystems that can be used
+ * A enum class to described the subsystems that can be used.
  */
 public enum SubSystemType {
 
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/SystemOperation.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/SystemOperation.java
index 17a1a71..a03fb52 100644
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/SystemOperation.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/SystemOperation.java
@@ -19,7 +19,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * A class that implements system operations for using cgroups
+ * A class that implements system operations for using cgroups.
  */
 public class SystemOperation {
 
@@ -33,13 +33,13 @@ public class SystemOperation {
     public static void mount(String name, String target, String type, String options) throws IOException {
         StringBuilder sb = new StringBuilder();
         sb.append("mount -t ")
-          .append(type)
-          .append(" -o ")
-          .append(options)
-          .append(" ")
-          .append(name)
-          .append(" ")
-          .append(target);
+                .append(type)
+                .append(" -o ")
+                .append(options)
+                .append(" ")
+                .append(name)
+                .append(" ")
+                .append(target);
         SystemOperation.exec(sb.toString());
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/core/BlkioCore.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/core/BlkioCore.java
index 75fe134..7bcf570 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/core/BlkioCore.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/core/BlkioCore.java
@@ -95,26 +95,32 @@ public class BlkioCore implements CgroupCore {
         return parseConfig(BLKIO_THROTTLE_WRITE_BPS_DEVICE);
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public void setReadIOps(Device device, long iops) throws IOException {
         CgroupUtils.writeFileByLine(CgroupUtils.getDir(this.dir, BLKIO_THROTTLE_READ_IOPS_DEVICE), makeContext(device, iops));
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Long> getReadIOps() throws IOException {
         return parseConfig(BLKIO_THROTTLE_READ_IOPS_DEVICE);
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public void setWriteIOps(Device device, long iops) throws IOException {
         CgroupUtils.writeFileByLine(CgroupUtils.getDir(this.dir, BLKIO_THROTTLE_WRITE_IOPS_DEVICE), makeContext(device, iops));
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Long> getWriteIOps() throws IOException {
         return parseConfig(BLKIO_THROTTLE_WRITE_IOPS_DEVICE);
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Map<RecordType, Long>> getThrottleIOServiced() throws IOException {
         return this.analyseRecord(CgroupUtils.readFileByLine(CgroupUtils.getDir(this.dir, BLKIO_THROTTLE_IO_SERVICED)));
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Map<RecordType, Long>> getThrottleIOServiceByte() throws IOException {
         return this.analyseRecord(CgroupUtils.readFileByLine(CgroupUtils.getDir(this.dir, BLKIO_THROTTLE_IO_SERVICE_BYTES)));
     }
@@ -127,26 +133,32 @@ public class BlkioCore implements CgroupCore {
         return parseConfig(BLKIO_SECTORS);
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Map<RecordType, Long>> getIOServiced() throws IOException {
         return this.analyseRecord(CgroupUtils.readFileByLine(CgroupUtils.getDir(this.dir, BLKIO_IO_SERVICED)));
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Map<RecordType, Long>> getIOServiceBytes() throws IOException {
         return this.analyseRecord(CgroupUtils.readFileByLine(CgroupUtils.getDir(this.dir, BLKIO_IO_SERVICE_BYTES)));
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Map<RecordType, Long>> getIOServiceTime() throws IOException {
         return this.analyseRecord(CgroupUtils.readFileByLine(CgroupUtils.getDir(this.dir, BLKIO_IO_SERVICE_TIME)));
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Map<RecordType, Long>> getIOWaitTime() throws IOException {
         return this.analyseRecord(CgroupUtils.readFileByLine(CgroupUtils.getDir(this.dir, BLKIO_IO_WAIT_TIME)));
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Map<RecordType, Long>> getIOMerged() throws IOException {
         return this.analyseRecord(CgroupUtils.readFileByLine(CgroupUtils.getDir(this.dir, BLKIO_IO_MERGED)));
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public Map<Device, Map<RecordType, Long>> getIOQueued() throws IOException {
         return this.analyseRecord(CgroupUtils.readFileByLine(CgroupUtils.getDir(this.dir, BLKIO_IO_QUEUED)));
     }
diff --git a/storm-client/src/jvm/org/apache/storm/coordination/BatchBoltExecutor.java b/storm-client/src/jvm/org/apache/storm/coordination/BatchBoltExecutor.java
index 9f733b1..4b7fcf4 100644
--- a/storm-client/src/jvm/org/apache/storm/coordination/BatchBoltExecutor.java
+++ b/storm-client/src/jvm/org/apache/storm/coordination/BatchBoltExecutor.java
@@ -29,22 +29,22 @@ import org.slf4j.LoggerFactory;
 public class BatchBoltExecutor implements IRichBolt, FinishedCallback, TimeoutCallback {
     public static final Logger LOG = LoggerFactory.getLogger(BatchBoltExecutor.class);
 
-    byte[] _boltSer;
-    Map<Object, IBatchBolt> _openTransactions;
-    Map _conf;
-    TopologyContext _context;
-    BatchOutputCollectorImpl _collector;
+    private byte[] boltSer;
+    private Map<Object, IBatchBolt> openTransactions;
+    private Map conf;
+    private TopologyContext context;
+    private BatchOutputCollectorImpl collector;
 
     public BatchBoltExecutor(IBatchBolt bolt) {
-        _boltSer = Utils.javaSerialize(bolt);
+        boltSer = Utils.javaSerialize(bolt);
     }
 
     @Override
     public void prepare(Map<String, Object> conf, TopologyContext context, OutputCollector collector) {
-        _conf = conf;
-        _context = context;
-        _collector = new BatchOutputCollectorImpl(collector);
-        _openTransactions = new HashMap<>();
+        this.conf = conf;
+        this.context = context;
+        this.collector = new BatchOutputCollectorImpl(collector);
+        openTransactions = new HashMap<>();
     }
 
     @Override
@@ -53,10 +53,10 @@ public class BatchBoltExecutor implements IRichBolt, FinishedCallback, TimeoutCa
         IBatchBolt bolt = getBatchBolt(id);
         try {
             bolt.execute(input);
-            _collector.ack(input);
+            collector.ack(input);
         } catch (FailedException e) {
             LOG.error("Failed to process tuple in batch", e);
-            _collector.fail(input);
+            collector.fail(input);
         }
     }
 
@@ -67,13 +67,13 @@ public class BatchBoltExecutor implements IRichBolt, FinishedCallback, TimeoutCa
     @Override
     public void finishedId(Object id) {
         IBatchBolt bolt = getBatchBolt(id);
-        _openTransactions.remove(id);
+        openTransactions.remove(id);
         bolt.finishBatch();
     }
 
     @Override
     public void timeoutId(Object attempt) {
-        _openTransactions.remove(attempt);
+        openTransactions.remove(attempt);
     }
 
 
@@ -88,16 +88,16 @@ public class BatchBoltExecutor implements IRichBolt, FinishedCallback, TimeoutCa
     }
 
     private IBatchBolt getBatchBolt(Object id) {
-        IBatchBolt bolt = _openTransactions.get(id);
+        IBatchBolt bolt = openTransactions.get(id);
         if (bolt == null) {
             bolt = newTransactionalBolt();
-            bolt.prepare(_conf, _context, _collector, id);
-            _openTransactions.put(id, bolt);
+            bolt.prepare(conf, context, collector, id);
+            openTransactions.put(id, bolt);
         }
         return bolt;
     }
 
     private IBatchBolt newTransactionalBolt() {
-        return Utils.javaDeserialize(_boltSer, IBatchBolt.class);
+        return Utils.javaDeserialize(boltSer, IBatchBolt.class);
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/coordination/BatchOutputCollector.java b/storm-client/src/jvm/org/apache/storm/coordination/BatchOutputCollector.java
index fb57e4a..2baff31 100644
--- a/storm-client/src/jvm/org/apache/storm/coordination/BatchOutputCollector.java
+++ b/storm-client/src/jvm/org/apache/storm/coordination/BatchOutputCollector.java
@@ -43,7 +43,7 @@ public abstract class BatchOutputCollector {
     public abstract void emitDirect(int taskId, String streamId, List<Object> tuple);
 
     /**
-     * Flush any buffered tuples (when batching is enabled)
+     * Flush any buffered tuples (when batching is enabled).
      */
     public abstract void flush();
 
diff --git a/storm-client/src/jvm/org/apache/storm/coordination/BatchOutputCollectorImpl.java b/storm-client/src/jvm/org/apache/storm/coordination/BatchOutputCollectorImpl.java
index b4b00c4..bea8a45 100644
--- a/storm-client/src/jvm/org/apache/storm/coordination/BatchOutputCollectorImpl.java
+++ b/storm-client/src/jvm/org/apache/storm/coordination/BatchOutputCollectorImpl.java
@@ -17,37 +17,37 @@ import org.apache.storm.task.OutputCollector;
 import org.apache.storm.tuple.Tuple;
 
 public class BatchOutputCollectorImpl extends BatchOutputCollector {
-    OutputCollector _collector;
+    private OutputCollector collector;
 
     public BatchOutputCollectorImpl(OutputCollector collector) {
-        _collector = collector;
+        this.collector = collector;
     }
 
     @Override
     public List<Integer> emit(String streamId, List<Object> tuple) {
-        return _collector.emit(streamId, tuple);
+        return collector.emit(streamId, tuple);
     }
 
     @Override
     public void emitDirect(int taskId, String streamId, List<Object> tuple) {
-        _collector.emitDirect(taskId, streamId, tuple);
+        collector.emitDirect(taskId, streamId, tuple);
     }
 
     @Override
     public void flush() {
-        _collector.flush();
+        collector.flush();
     }
 
     @Override
     public void reportError(Throwable error) {
-        _collector.reportError(error);
+        collector.reportError(error);
     }
 
     public void ack(Tuple tup) {
-        _collector.ack(tup);
+        collector.ack(tup);
     }
 
     public void fail(Tuple tup) {
-        _collector.fail(tup);
+        collector.fail(tup);
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/coordination/CoordinatedBolt.java b/storm-client/src/jvm/org/apache/storm/coordination/CoordinatedBolt.java
index 181e4ec..18a14db 100644
--- a/storm-client/src/jvm/org/apache/storm/coordination/CoordinatedBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/coordination/CoordinatedBolt.java
@@ -43,13 +43,13 @@ import org.slf4j.LoggerFactory;
  */
 public class CoordinatedBolt implements IRichBolt {
     public static final Logger LOG = LoggerFactory.getLogger(CoordinatedBolt.class);
-    private Map<String, SourceArgs> _sourceArgs;
-    private IdStreamSpec _idStreamSpec;
-    private IRichBolt _delegate;
-    private Integer _numSourceReports;
-    private List<Integer> _countOutTasks = new ArrayList<>();
-    private OutputCollector _collector;
-    private TimeCacheMap<Object, TrackingInfo> _tracked;
+    private Map<String, SourceArgs> sourceArgs;
+    private IdStreamSpec idStreamSpec;
+    private IRichBolt delegate;
+    private Integer numSourceReports;
+    private List<Integer> countOutTasks = new ArrayList<>();
+    private OutputCollector collector;
+    private TimeCacheMap<Object, TrackingInfo> tracked;
 
     public CoordinatedBolt(IRichBolt delegate) {
         this(delegate, null, null);
@@ -60,12 +60,12 @@ public class CoordinatedBolt implements IRichBolt {
     }
 
     public CoordinatedBolt(IRichBolt delegate, Map<String, SourceArgs> sourceArgs, IdStreamSpec idStreamSpec) {
-        _sourceArgs = sourceArgs;
-        if (_sourceArgs == null) {
-            _sourceArgs = new HashMap<>();
+        this.sourceArgs = sourceArgs;
+        if (this.sourceArgs == null) {
+            this.sourceArgs = new HashMap<>();
         }
-        _delegate = delegate;
-        _idStreamSpec = idStreamSpec;
+        this.delegate = delegate;
+        this.idStreamSpec = idStreamSpec;
     }
 
     private static Map<String, SourceArgs> singleSourceArgs(String sourceComponent, SourceArgs sourceArgs) {
@@ -77,27 +77,27 @@ public class CoordinatedBolt implements IRichBolt {
     @Override
     public void prepare(Map<String, Object> config, TopologyContext context, OutputCollector collector) {
         TimeCacheMap.ExpiredCallback<Object, TrackingInfo> callback = null;
-        if (_delegate instanceof TimeoutCallback) {
+        if (delegate instanceof TimeoutCallback) {
             callback = new TimeoutItems();
         }
-        _tracked = new TimeCacheMap<>(context.maxTopologyMessageTimeout(), callback);
-        _collector = collector;
-        _delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
+        tracked = new TimeCacheMap<>(context.maxTopologyMessageTimeout(), callback);
+        this.collector = collector;
+        delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
         for (String component : Utils.get(context.getThisTargets(),
                                           Constants.COORDINATED_STREAM_ID,
                                           new HashMap<String, Grouping>())
                                      .keySet()) {
             for (Integer task : context.getComponentTasks(component)) {
-                _countOutTasks.add(task);
+                countOutTasks.add(task);
             }
         }
-        if (!_sourceArgs.isEmpty()) {
-            _numSourceReports = 0;
-            for (Entry<String, SourceArgs> entry : _sourceArgs.entrySet()) {
+        if (!sourceArgs.isEmpty()) {
+            numSourceReports = 0;
+            for (Entry<String, SourceArgs> entry : sourceArgs.entrySet()) {
                 if (entry.getValue().singleCount) {
-                    _numSourceReports += 1;
+                    numSourceReports += 1;
                 } else {
-                    _numSourceReports += context.getComponentTasks(entry.getKey()).size();
+                    numSourceReports += context.getComponentTasks(entry.getKey()).size();
                 }
             }
         }
@@ -107,61 +107,59 @@ public class CoordinatedBolt implements IRichBolt {
         Object id = tup.getValue(0);
         boolean failed = false;
 
-        synchronized (_tracked) {
-            TrackingInfo track = _tracked.get(id);
+        synchronized (tracked) {
+            TrackingInfo track = tracked.get(id);
             try {
                 if (track != null) {
                     boolean delayed = false;
-                    if (_idStreamSpec == null && type == TupleType.COORD || _idStreamSpec != null && type == TupleType.ID) {
+                    if (idStreamSpec == null && type == TupleType.COORD || idStreamSpec != null && type == TupleType.ID) {
                         track.ackTuples.add(tup);
                         delayed = true;
                     }
                     if (track.failed) {
                         failed = true;
                         for (Tuple t : track.ackTuples) {
-                            _collector.fail(t);
+                            collector.fail(t);
                         }
-                        _tracked.remove(id);
-                    } else if (track.receivedId
-                               && (_sourceArgs.isEmpty() ||
-                                   track.reportCount == _numSourceReports &&
-                                   track.expectedTupleCount == track.receivedTuples)) {
-                        if (_delegate instanceof FinishedCallback) {
-                            ((FinishedCallback) _delegate).finishedId(id);
+                        tracked.remove(id);
+                    } else if (track.receivedId && (sourceArgs.isEmpty()
+                            || track.reportCount == numSourceReports && track.expectedTupleCount == track.receivedTuples)) {
+                        if (delegate instanceof FinishedCallback) {
+                            ((FinishedCallback) delegate).finishedId(id);
                         }
-                        if (!(_sourceArgs.isEmpty() || type != TupleType.REGULAR)) {
+                        if (!(sourceArgs.isEmpty() || type != TupleType.REGULAR)) {
                             throw new IllegalStateException("Coordination condition met on a non-coordinating tuple. Should be impossible");
                         }
-                        Iterator<Integer> outTasks = _countOutTasks.iterator();
+                        Iterator<Integer> outTasks = countOutTasks.iterator();
                         while (outTasks.hasNext()) {
                             int task = outTasks.next();
                             int numTuples = Utils.get(track.taskEmittedTuples, task, 0);
-                            _collector.emitDirect(task, Constants.COORDINATED_STREAM_ID, tup, new Values(id, numTuples));
+                            collector.emitDirect(task, Constants.COORDINATED_STREAM_ID, tup, new Values(id, numTuples));
                         }
                         for (Tuple t : track.ackTuples) {
-                            _collector.ack(t);
+                            collector.ack(t);
                         }
                         track.finished = true;
-                        _tracked.remove(id);
+                        tracked.remove(id);
                     }
                     if (!delayed && type != TupleType.REGULAR) {
                         if (track.failed) {
-                            _collector.fail(tup);
+                            collector.fail(tup);
                         } else {
-                            _collector.ack(tup);
+                            collector.ack(tup);
                         }
                     }
                 } else {
                     if (type != TupleType.REGULAR) {
-                        _collector.fail(tup);
+                        collector.fail(tup);
                     }
                 }
             } catch (FailedException e) {
                 LOG.error("Failed to finish batch", e);
                 for (Tuple t : track.ackTuples) {
-                    _collector.fail(t);
+                    collector.fail(t);
                 }
-                _tracked.remove(id);
+                tracked.remove(id);
                 failed = true;
             }
         }
@@ -173,58 +171,58 @@ public class CoordinatedBolt implements IRichBolt {
         Object id = tuple.getValue(0);
         TrackingInfo track;
         TupleType type = getTupleType(tuple);
-        synchronized (_tracked) {
-            track = _tracked.get(id);
+        synchronized (tracked) {
+            track = tracked.get(id);
             if (track == null) {
                 track = new TrackingInfo();
-                if (_idStreamSpec == null) {
+                if (idStreamSpec == null) {
                     track.receivedId = true;
                 }
-                _tracked.put(id, track);
+                tracked.put(id, track);
             }
         }
 
         if (type == TupleType.ID) {
-            synchronized (_tracked) {
+            synchronized (tracked) {
                 track.receivedId = true;
             }
             checkFinishId(tuple, type);
         } else if (type == TupleType.COORD) {
             int count = (Integer) tuple.getValue(1);
-            synchronized (_tracked) {
+            synchronized (tracked) {
                 track.reportCount++;
                 track.expectedTupleCount += count;
             }
             checkFinishId(tuple, type);
         } else {
-            synchronized (_tracked) {
-                _delegate.execute(tuple);
+            synchronized (tracked) {
+                delegate.execute(tuple);
             }
         }
     }
 
     @Override
     public void cleanup() {
-        _delegate.cleanup();
-        _tracked.cleanup();
+        delegate.cleanup();
+        tracked.cleanup();
     }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        _delegate.declareOutputFields(declarer);
+        delegate.declareOutputFields(declarer);
         declarer.declareStream(Constants.COORDINATED_STREAM_ID, true, new Fields("id", "count"));
     }
 
     @Override
     public Map<String, Object> getComponentConfiguration() {
-        return _delegate.getComponentConfiguration();
+        return delegate.getComponentConfiguration();
     }
 
     private TupleType getTupleType(Tuple tuple) {
-        if (_idStreamSpec != null
-            && tuple.getSourceGlobalStreamId().equals(_idStreamSpec._id)) {
+        if (idStreamSpec != null
+            && tuple.getSourceGlobalStreamId().equals(idStreamSpec.id)) {
             return TupleType.ID;
-        } else if (!_sourceArgs.isEmpty()
+        } else if (!sourceArgs.isEmpty()
                    && tuple.getSourceStreamId().equals(Constants.COORDINATED_STREAM_ID)) {
             return TupleType.COORD;
         } else {
@@ -279,19 +277,19 @@ public class CoordinatedBolt implements IRichBolt {
 
         @Override
         public String toString() {
-            return "reportCount: " + reportCount + "\n" +
-                   "expectedTupleCount: " + expectedTupleCount + "\n" +
-                   "receivedTuples: " + receivedTuples + "\n" +
-                   "failed: " + failed + "\n" +
-                   taskEmittedTuples.toString();
+            return "reportCount: " + reportCount + "\n"
+                    + "expectedTupleCount: " + expectedTupleCount + "\n"
+                    + "receivedTuples: " + receivedTuples + "\n"
+                    + "failed: " + failed + "\n"
+                    + taskEmittedTuples.toString();
         }
     }
 
     public static class IdStreamSpec implements Serializable {
-        GlobalStreamId _id;
+        GlobalStreamId id;
 
         protected IdStreamSpec(String component, String stream) {
-            _id = new GlobalStreamId(component, stream);
+            id = new GlobalStreamId(component, stream);
         }
 
         public static IdStreamSpec makeDetectSpec(String component, String stream) {
@@ -299,20 +297,20 @@ public class CoordinatedBolt implements IRichBolt {
         }
 
         public GlobalStreamId getGlobalStreamId() {
-            return _id;
+            return id;
         }
     }
 
     public class CoordinatedOutputCollector implements IOutputCollector {
-        IOutputCollector _delegate;
+        IOutputCollector delegate;
 
         public CoordinatedOutputCollector(IOutputCollector delegate) {
-            _delegate = delegate;
+            this.delegate = delegate;
         }
 
         @Override
         public List<Integer> emit(String stream, Collection<Tuple> anchors, List<Object> tuple) {
-            List<Integer> tasks = _delegate.emit(stream, anchors, tuple);
+            List<Integer> tasks = delegate.emit(stream, anchors, tuple);
             updateTaskCounts(tuple.get(0), tasks);
             return tasks;
         }
@@ -320,58 +318,58 @@ public class CoordinatedBolt implements IRichBolt {
         @Override
         public void emitDirect(int task, String stream, Collection<Tuple> anchors, List<Object> tuple) {
             updateTaskCounts(tuple.get(0), Arrays.asList(task));
-            _delegate.emitDirect(task, stream, anchors, tuple);
+            delegate.emitDirect(task, stream, anchors, tuple);
         }
 
         @Override
         public void ack(Tuple tuple) {
             Object id = tuple.getValue(0);
-            synchronized (_tracked) {
-                TrackingInfo track = _tracked.get(id);
+            synchronized (tracked) {
+                TrackingInfo track = tracked.get(id);
                 if (track != null) {
                     track.receivedTuples++;
                 }
             }
             boolean failed = checkFinishId(tuple, TupleType.REGULAR);
             if (failed) {
-                _delegate.fail(tuple);
+                delegate.fail(tuple);
             } else {
-                _delegate.ack(tuple);
+                delegate.ack(tuple);
             }
         }
 
         @Override
         public void fail(Tuple tuple) {
             Object id = tuple.getValue(0);
-            synchronized (_tracked) {
-                TrackingInfo track = _tracked.get(id);
+            synchronized (tracked) {
+                TrackingInfo track = tracked.get(id);
                 if (track != null) {
                     track.failed = true;
                 }
             }
             checkFinishId(tuple, TupleType.REGULAR);
-            _delegate.fail(tuple);
+            delegate.fail(tuple);
         }
 
         @Override
         public void flush() {
-            _delegate.flush();
+            delegate.flush();
         }
 
         @Override
         public void resetTimeout(Tuple tuple) {
-            _delegate.resetTimeout(tuple);
+            delegate.resetTimeout(tuple);
         }
 
         @Override
         public void reportError(Throwable error) {
-            _delegate.reportError(error);
+            delegate.reportError(error);
         }
 
 
         private void updateTaskCounts(Object id, List<Integer> tasks) {
-            synchronized (_tracked) {
-                TrackingInfo track = _tracked.get(id);
+            synchronized (tracked) {
+                TrackingInfo track = tracked.get(id);
                 if (track != null) {
                     Map<Integer, Integer> taskEmittedTuples = track.taskEmittedTuples;
                     for (Integer task : tasks) {
@@ -386,12 +384,12 @@ public class CoordinatedBolt implements IRichBolt {
     private class TimeoutItems implements TimeCacheMap.ExpiredCallback<Object, TrackingInfo> {
         @Override
         public void expire(Object id, TrackingInfo val) {
-            synchronized (_tracked) {
+            synchronized (tracked) {
                 // the combination of the lock and the finished flag ensure that
                 // an id is never timed out if it has been finished
                 val.failed = true;
                 if (!val.finished) {
-                    ((TimeoutCallback) _delegate).timeoutId(id);
+                    ((TimeoutCallback) delegate).timeoutId(id);
                 }
             }
         }
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/GrouperFactory.java b/storm-client/src/jvm/org/apache/storm/daemon/GrouperFactory.java
index 27a54e8..0f83597 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/GrouperFactory.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/GrouperFactory.java
@@ -121,7 +121,7 @@ public class GrouperFactory {
     }
 
     /**
-     * A bridge between CustomStreamGrouping and LoadAwareCustomStreamGrouping
+     * A bridge between CustomStreamGrouping and LoadAwareCustomStreamGrouping.
      */
     public static class BasicLoadAwareCustomStreamGrouping implements LoadAwareCustomStreamGrouping {
 
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/StormCommon.java b/storm-client/src/jvm/org/apache/storm/daemon/StormCommon.java
index 2bb1871..6a5cee8 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/StormCommon.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/StormCommon.java
@@ -252,9 +252,6 @@ public class StormCommon {
 
     @SuppressWarnings("unchecked")
     public static void addAcker(Map<String, Object> conf, StormTopology topology) {
-        int ackerNum =
-            ObjectReader.getInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), ObjectReader.getInt(conf.get(Config.TOPOLOGY_WORKERS)));
-        Map<GlobalStreamId, Grouping> inputs = ackerInputs(topology);
 
         Map<String, StreamInfo> outputStreams = new HashMap<String, StreamInfo>();
         outputStreams.put(Acker.ACKER_ACK_STREAM_ID, Thrift.directOutputFields(Arrays.asList("id", "time-delta-ms")));
@@ -262,9 +259,12 @@ public class StormCommon {
         outputStreams.put(Acker.ACKER_RESET_TIMEOUT_STREAM_ID, Thrift.directOutputFields(Arrays.asList("id", "time-delta-ms")));
 
         Map<String, Object> ackerConf = new HashMap<>();
+        int ackerNum =
+                ObjectReader.getInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), ObjectReader.getInt(conf.get(Config.TOPOLOGY_WORKERS)));
         ackerConf.put(Config.TOPOLOGY_TASKS, ackerNum);
         ackerConf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, ObjectReader.getInt(conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)));
 
+        Map<GlobalStreamId, Grouping> inputs = ackerInputs(topology);
         Bolt acker = Thrift.prepareSerializedBoltDetails(inputs, makeAckerBolt(), outputStreams, ackerNum, ackerConf);
 
         for (Bolt bolt : topology.get_bolts().values()) {
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/AdvancedFSOps.java b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/AdvancedFSOps.java
index 79b5af6..814ec12 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/AdvancedFSOps.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/AdvancedFSOps.java
@@ -43,16 +43,17 @@ import org.apache.storm.utils.Utils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class AdvancedFSOps implements IAdvancedFSOps {
     private static final Logger LOG = LoggerFactory.getLogger(AdvancedFSOps.class);
-    protected final boolean _symlinksDisabled;
+    protected final boolean symlinksDisabled;
 
     protected AdvancedFSOps(Map<String, Object> conf) {
-        _symlinksDisabled = (boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false);
+        symlinksDisabled = (boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false);
     }
 
     /**
-     * Factory to create a new AdvancedFSOps
+     * Factory to create a new AdvancedFSOps.
      *
      * @param conf the configuration of the process
      * @return the appropriate instance of the class for this config and environment.
@@ -68,7 +69,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Set directory permissions to (OWNER)RWX (GROUP)R-X (OTHER)--- On some systems that do not support this, it may become a noop
+     * Set directory permissions to (OWNER)RWX (GROUP)R-X (OTHER)--- On some systems that do not support this, it may become a noop.
      *
      * @param dir the directory to change permissions on
      * @throws IOException on any error
@@ -83,7 +84,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Move fromDir to toDir, and try to make it an atomic move if possible
+     * Move fromDir to toDir, and try to make it an atomic move if possible.
      *
      * @param fromDir what to move
      * @param toDir   where to move it from
@@ -96,7 +97,8 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * @return true if an atomic directory move works, else false.
+     * Check whether supports atomic directory move.
+     * @return true if an atomic directory move works, else false
      */
     @Override
     public boolean supportsAtomicDirectoryMove() {
@@ -104,7 +106,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Copy a directory
+     * Copy a directory.
      *
      * @param fromDir from where
      * @param toDir   to where
@@ -116,7 +118,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Setup permissions properly for an internal blob store path
+     * Setup permissions properly for an internal blob store path.
      *
      * @param path the path to set the permissions on
      * @param user the user to change the permissions for
@@ -155,12 +157,13 @@ public class AdvancedFSOps implements IAdvancedFSOps {
             try {
                 FileUtils.forceDelete(path);
             } catch (FileNotFoundException ignored) {
+                //ignore
             }
         }
     }
 
     /**
-     * Setup the permissions for the storm code dir
+     * Setup the permissions for the storm code dir.
      *
      * @param user the user that owns the topology
      * @param path the directory to set the permissions on
@@ -172,7 +175,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Setup the permissions for the worker artifacts dirs
+     * Setup the permissions for the worker artifacts dirs.
      *
      * @param user the user that owns the topology
      * @param path the directory to set the permissions on
@@ -230,7 +233,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Check if a file exists or not
+     * Check if a file exists or not.
      *
      * @param path the path to check
      * @return true if it exists else false
@@ -243,7 +246,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Check if a file exists or not
+     * Check if a file exists or not.
      *
      * @param path the path to check
      * @return true if it exists else false
@@ -256,7 +259,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Get a writer for the given location
+     * Get a writer for the given location.
      *
      * @param file the file to write to
      * @return the Writer to use.
@@ -269,7 +272,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Get an output stream to write to a given file
+     * Get an output stream to write to a given file.
      *
      * @param file the file to write to
      * @return an OutputStream for that file
@@ -282,7 +285,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Dump a string to a file
+     * Dump a string to a file.
      *
      * @param location where to write to
      * @param data     the data to write
@@ -300,7 +303,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Read the contents of a file into a String
+     * Read the contents of a file into a String.
      *
      * @param location the file to read
      * @return the contents of the file
@@ -326,7 +329,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
     }
 
     /**
-     * Create a symbolic link pointing at target
+     * Create a symbolic link pointing at target.
      *
      * @param link   the link to create
      * @param target where it should point to
@@ -334,7 +337,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
      */
     @Override
     public void createSymlink(File link, File target) throws IOException {
-        if (_symlinksDisabled) {
+        if (symlinksDisabled) {
             throw new IOException("Symlinks have been disabled, this should not be called");
         }
         Path plink = link.toPath().toAbsolutePath();
@@ -350,21 +353,22 @@ public class AdvancedFSOps implements IAdvancedFSOps {
         Files.createSymbolicLink(plink, ptarget);
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private static class AdvancedRunAsUserFSOps extends AdvancedFSOps {
-        private final Map<String, Object> _conf;
+        private final Map<String, Object> conf;
 
         public AdvancedRunAsUserFSOps(Map<String, Object> conf) {
             super(conf);
             if (Utils.isOnWindows()) {
                 throw new UnsupportedOperationException("ERROR: Windows doesn't support running workers as different users yet");
             }
-            _conf = conf;
+            this.conf = conf;
         }
 
         @Override
         public void setupBlobPermissions(File path, String user) throws IOException {
             String logPrefix = "setup blob permissions for " + path;
-            ClientSupervisorUtils.processLauncherAndWait(_conf, user, Arrays.asList("blob", path.toString()), null, logPrefix);
+            ClientSupervisorUtils.processLauncherAndWait(conf, user, Arrays.asList("blob", path.toString()), null, logPrefix);
         }
 
         @Override
@@ -378,7 +382,7 @@ public class AdvancedFSOps implements IAdvancedFSOps {
                 List<String> commands = new ArrayList<>();
                 commands.add("rmr");
                 commands.add(absolutePath);
-                ClientSupervisorUtils.processLauncherAndWait(_conf, user, commands, null, logPrefix);
+                ClientSupervisorUtils.processLauncherAndWait(conf, user, commands, null, logPrefix);
 
                 if (Utils.checkFileExists(absolutePath)) {
                     // It's possible that permissions were not set properly on the directory, and
@@ -399,18 +403,19 @@ public class AdvancedFSOps implements IAdvancedFSOps {
 
         @Override
         public void setupStormCodeDir(String user, File path) throws IOException {
-            ClientSupervisorUtils.setupStormCodeDir(_conf, user, path.getCanonicalPath());
+            ClientSupervisorUtils.setupStormCodeDir(conf, user, path.getCanonicalPath());
         }
 
         @Override
         public void setupWorkerArtifactsDir(String user, File path) throws IOException {
-            ClientSupervisorUtils.setupWorkerArtifactsDir(_conf, user, path.getCanonicalPath());
+            ClientSupervisorUtils.setupWorkerArtifactsDir(conf, user, path.getCanonicalPath());
         }
     }
 
     /**
-     * Operations that need to override the default ones when running on Windows
+     * Operations that need to override the default ones when running on Windows.
      */
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private static class AdvancedWindowsFSOps extends AdvancedFSOps {
 
         public AdvancedWindowsFSOps(Map<String, Object> conf) {
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ClientSupervisorUtils.java b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ClientSupervisorUtils.java
index 0aa5734..6bf6752 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ClientSupervisorUtils.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ClientSupervisorUtils.java
@@ -42,7 +42,6 @@ public class ClientSupervisorUtils {
 
     static boolean doRequiredTopoFilesExist(Map<String, Object> conf, String stormId) throws IOException {
         String stormroot = ConfigUtils.supervisorStormDistRoot(conf, stormId);
-        String stormjarpath = ConfigUtils.supervisorStormJarPath(stormroot);
         String stormcodepath = ConfigUtils.supervisorStormCodePath(stormroot);
         String stormconfpath = ConfigUtils.supervisorStormConfPath(stormroot);
         if (!Utils.checkFileExists(stormroot)) {
@@ -54,6 +53,7 @@ public class ClientSupervisorUtils {
         if (!Utils.checkFileExists(stormconfpath)) {
             return false;
         }
+        String stormjarpath = ConfigUtils.supervisorStormJarPath(stormroot);
         if (ConfigUtils.isLocalMode(conf) || Utils.checkFileExists(stormjarpath)) {
             return true;
         }
@@ -111,9 +111,6 @@ public class ClientSupervisorUtils {
      * @param exitCodeCallback code to be called passing the exit code value when the process completes
      * @param dir              the working directory of the new process
      * @return the new process
-     *
-     * @throws IOException
-     * @see ProcessBuilder
      */
     public static Process launchProcess(List<String> command,
                                         Map<String, String> environment,
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ExitCodeCallback.java b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ExitCodeCallback.java
index 9c18ce1..de42895 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ExitCodeCallback.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ExitCodeCallback.java
@@ -18,7 +18,7 @@ package org.apache.storm.daemon.supervisor;
 public interface ExitCodeCallback {
 
     /**
-     * The process finished
+     * The process finished.
      *
      * @param exitCode the exit code of the finished process.
      */
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/IAdvancedFSOps.java b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/IAdvancedFSOps.java
index cb5dfc6..f55ba7f 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/IAdvancedFSOps.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/IAdvancedFSOps.java
@@ -18,7 +18,6 @@
 
 package org.apache.storm.daemon.supervisor;
 
-
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
@@ -27,10 +26,11 @@ import java.nio.file.DirectoryStream;
 import java.nio.file.Path;
 import java.util.Map;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public interface IAdvancedFSOps {
 
     /**
-     * Set directory permissions to (OWNER)RWX (GROUP)R-X (OTHER)--- On some systems that do not support this, it may become a noop
+     * Set directory permissions to (OWNER)RWX (GROUP)R-X (OTHER)--- On some systems that do not support this, it may become a noop.
      *
      * @param dir the directory to change permissions on
      * @throws IOException on any error
@@ -38,7 +38,7 @@ public interface IAdvancedFSOps {
     void restrictDirectoryPermissions(File dir) throws IOException;
 
     /**
-     * Move fromDir to toDir, and try to make it an atomic move if possible
+     * Move fromDir to toDir, and try to make it an atomic move if possible.
      *
      * @param fromDir what to move
      * @param toDir   where to move it from
@@ -47,12 +47,13 @@ public interface IAdvancedFSOps {
     void moveDirectoryPreferAtomic(File fromDir, File toDir) throws IOException;
 
     /**
-     * @return true if an atomic directory move works, else false.
+     * Check whether supports atomic directory move.
+     * @return true if an atomic directory move works, else false
      */
     boolean supportsAtomicDirectoryMove();
 
     /**
-     * Copy a directory
+     * Copy a directory.
      *
      * @param fromDir from where
      * @param toDir   to where
@@ -61,7 +62,7 @@ public interface IAdvancedFSOps {
     void copyDirectory(File fromDir, File toDir) throws IOException;
 
     /**
-     * Setup permissions properly for an internal blob store path
+     * Setup permissions properly for an internal blob store path.
      *
      * @param path the path to set the permissions on
      * @param user the user to change the permissions for
@@ -88,7 +89,7 @@ public interface IAdvancedFSOps {
     void deleteIfExists(File path) throws IOException;
 
     /**
-     * Setup the permissions for the storm code dir
+     * Setup the permissions for the storm code dir.
      *
      * @param user the owner of the topology
      * @param path the directory to set the permissions on
@@ -97,7 +98,7 @@ public interface IAdvancedFSOps {
     void setupStormCodeDir(String user, File path) throws IOException;
 
     /**
-     * Setup the permissions for the worker artifacts dirs
+     * Setup the permissions for the worker artifacts dirs.
      *
      * @param user the owner of the topology
      * @param path the directory to set the permissions on
@@ -154,7 +155,7 @@ public interface IAdvancedFSOps {
     DirectoryStream<Path> newDirectoryStream(Path dir) throws IOException;
 
     /**
-     * Check if a file exists or not
+     * Check if a file exists or not.
      *
      * @param path the path to check
      * @return true if it exists else false
@@ -164,7 +165,7 @@ public interface IAdvancedFSOps {
     boolean fileExists(File path) throws IOException;
 
     /**
-     * Check if a file exists or not
+     * Check if a file exists or not.
      *
      * @param path the path to check
      * @return true if it exists else false
@@ -174,7 +175,7 @@ public interface IAdvancedFSOps {
     boolean fileExists(Path path) throws IOException;
 
     /**
-     * Get a writer for the given location
+     * Get a writer for the given location.
      *
      * @param file the file to write to
      * @return the Writer to use.
@@ -184,7 +185,7 @@ public interface IAdvancedFSOps {
     Writer getWriter(File file) throws IOException;
 
     /**
-     * Get an output stream to write to a given file
+     * Get an output stream to write to a given file.
      *
      * @param file the file to write to
      * @return an OutputStream for that file
@@ -194,7 +195,7 @@ public interface IAdvancedFSOps {
     OutputStream getOutputStream(File file) throws IOException;
 
     /**
-     * Dump a string to a file
+     * Dump a string to a file.
      *
      * @param location where to write to
      * @param data     the data to write
@@ -203,7 +204,7 @@ public interface IAdvancedFSOps {
     void dump(File location, String data) throws IOException;
 
     /**
-     * Read the contents of a file into a String
+     * Read the contents of a file into a String.
      *
      * @param location the file to read
      * @return the contents of the file
@@ -223,7 +224,7 @@ public interface IAdvancedFSOps {
     byte[] slurp(File location) throws IOException;
 
     /**
-     * Create a symbolic link pointing at target
+     * Create a symbolic link pointing at target.
      *
      * @param link   the link to create
      * @param target where it should point to
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/worker/BackPressureTracker.java b/storm-client/src/jvm/org/apache/storm/daemon/worker/BackPressureTracker.java
index 7e98658..5800a00 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/worker/BackPressureTracker.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/worker/BackPressureTracker.java
@@ -22,19 +22,19 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import org.apache.storm.messaging.netty.BackPressureStatus;
-import org.apache.storm.utils.JCQueue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.util.concurrent.atomic.AtomicBoolean;
-
 import java.util.stream.Collectors;
+
+import org.apache.storm.messaging.netty.BackPressureStatus;
 import org.apache.storm.shade.org.apache.commons.lang.builder.ToStringBuilder;
 import org.apache.storm.shade.org.apache.commons.lang.builder.ToStringStyle;
+import org.apache.storm.utils.JCQueue;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-/***
- *   Tracks the BackPressure status.
+/**
+ * Tracks the BackPressure status.
  */
 public class BackPressureTracker {
     static final Logger LOG = LoggerFactory.getLogger(BackPressureTracker.class);
@@ -53,9 +53,11 @@ public class BackPressureTracker {
         tasks.get(taskId).backpressure.set(false);
     }
 
-    /***
+    /**
      * Record BP for a task.
-     * This is called by transferLocalBatch() on NettyWorker thread
+     *
+     * <p>This is called by transferLocalBatch() on NettyWorker thread
+     *
      * @return true if an update was recorded, false if taskId is already under BP
      */
     public boolean recordBackPressure(Integer taskId) {
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/worker/Worker.java b/storm-client/src/jvm/org/apache/storm/daemon/worker/Worker.java
index 175a91a..23e577e 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/worker/Worker.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/worker/Worker.java
@@ -261,17 +261,16 @@ public class Worker implements Shutdownable, DaemonCommon {
             });
 
         workerState.checkForUpdatedBlobsTimer.scheduleRecurring(0,
-                                                                (Integer) conf
-                                                                    .getOrDefault(Config.WORKER_BLOB_UPDATE_POLL_INTERVAL_SECS, 10),
-                                                                () -> {
-                                                                    try {
-                                                                        LOG.debug("Checking if blobs have updated");
-                                                                        updateBlobUpdates();
-                                                                    } catch (IOException e) {
-                                                                        // IOException from reading the version files to be ignored
-                                                                        LOG.error(e.getStackTrace().toString());
-                                                                    }
-                                                                }
+                (Integer) conf.getOrDefault(Config.WORKER_BLOB_UPDATE_POLL_INTERVAL_SECS, 10),
+            () -> {
+                try {
+                    LOG.debug("Checking if blobs have updated");
+                    updateBlobUpdates();
+                } catch (IOException e) {
+                    // IOException from reading the version files to be ignored
+                    LOG.error(e.getStackTrace().toString());
+                }
+            }
         );
 
         // The jitter allows the clients to get the data at different times, and avoids thundering herd
@@ -309,15 +308,15 @@ public class Worker implements Shutdownable, DaemonCommon {
         }
 
         workerState.flushTupleTimer.scheduleRecurringMs(flushIntervalMillis, flushIntervalMillis,
-                                                        () -> {
-                                                            // send flush tuple to all local executors
-                                                            for (int i = 0; i < executors.size(); i++) {
-                                                                IRunningExecutor exec = executors.get(i);
-                                                                if (exec.getExecutorId().get(0) != Constants.SYSTEM_TASK_ID) {
-                                                                    exec.publishFlushTuple();
-                                                                }
-                                                            }
-                                                        }
+            () -> {
+                // send flush tuple to all local executors
+                for (int i = 0; i < executors.size(); i++) {
+                    IRunningExecutor exec = executors.get(i);
+                    if (exec.getExecutorId().get(0) != Constants.SYSTEM_TASK_ID) {
+                        exec.publishFlushTuple();
+                    }
+                }
+            }
         );
         LOG.info("Flush tuple will be generated every {} millis", flushIntervalMillis);
     }
@@ -367,11 +366,11 @@ public class Worker implements Shutdownable, DaemonCommon {
                                                                                   .toMap(IRunningExecutor::getExecutorId,
                                                                                          IRunningExecutor::renderStats)));
         }
-        Map<String, Object> zkHB = ClientStatsUtil.mkZkWorkerHb(workerState.topologyId, stats, workerState.uptime.upTime());
+        Map<String, Object> zkHb = ClientStatsUtil.mkZkWorkerHb(workerState.topologyId, stats, workerState.uptime.upTime());
         try {
             workerState.stormClusterState
                 .workerHeartbeat(workerState.topologyId, workerState.assignmentId, (long) workerState.port,
-                                 ClientStatsUtil.thriftifyZkWorkerHb(zkHB));
+                                 ClientStatsUtil.thriftifyZkWorkerHb(zkHb));
         } catch (Exception ex) {
             LOG.error("Worker failed to write heartbeats to ZK or Pacemaker...will retry", ex);
         }
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/worker/WorkerState.java b/storm-client/src/jvm/org/apache/storm/daemon/worker/WorkerState.java
index 913261d..f380769 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/worker/WorkerState.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/worker/WorkerState.java
@@ -152,11 +152,19 @@ public class WorkerState {
     private final Collection<IAutoCredentials> autoCredentials;
     private final StormMetricRegistry metricRegistry;
 
-    public WorkerState(Map<String, Object> conf, IContext mqContext, String topologyId, String assignmentId,
-                       Supplier<SupervisorIfaceFactory> supervisorIfaceSupplier, int port, String workerId, Map<String, Object> topologyConf, IStateStorage stateStorage,
-                       IStormClusterState stormClusterState, Collection<IAutoCredentials> autoCredentials,
-                       StormMetricRegistry metricRegistry) throws IOException,
-        InvalidTopologyException {
+    public WorkerState(Map<String, Object> conf,
+            IContext mqContext,
+            String topologyId,
+            String assignmentId,
+            Supplier<SupervisorIfaceFactory> supervisorIfaceSupplier,
+            int port,
+            String workerId,
+            Map<String, Object> topologyConf,
+            IStateStorage stateStorage,
+            IStormClusterState stormClusterState,
+            Collection<IAutoCredentials> autoCredentials,
+            StormMetricRegistry metricRegistry) throws IOException,
+            InvalidTopologyException {
         this.metricRegistry = metricRegistry;
         this.autoCredentials = autoCredentials;
         this.conf = conf;
@@ -230,9 +238,9 @@ public class WorkerState {
         this.receiver = this.mqContext.bind(topologyId, port, cb, newConnectionResponse);
     }
 
-    private static double getQueueLoad(JCQueue q) {
-        JCQueue.QueueMetrics qMetrics = q.getMetrics();
-        return ((double) qMetrics.population()) / qMetrics.capacity();
+    private static double getQueueLoad(JCQueue queue) {
+        JCQueue.QueueMetrics queueMetrics = queue.getMetrics();
+        return ((double) queueMetrics.population()) / queueMetrics.capacity();
     }
 
     public static boolean isConnectionReady(IConnection connection) {
@@ -463,7 +471,6 @@ public class WorkerState {
 
     public void refreshLoad(List<IRunningExecutor> execs) {
         Set<Integer> remoteTasks = Sets.difference(new HashSet<>(outboundTasks), new HashSet<>(localTaskIds));
-        Long now = System.currentTimeMillis();
         Map<Integer, Double> localLoad = new HashMap<>();
         for (IRunningExecutor exec : execs) {
             double receiveLoad = getQueueLoad(exec.getReceiveQueue());
@@ -475,6 +482,7 @@ public class WorkerState {
         loadMapping.setLocal(localLoad);
         loadMapping.setRemote(remoteLoad);
 
+        Long now = System.currentTimeMillis();
         if (now > nextLoadUpdate.get()) {
             receiver.sendLoadMetrics(localLoad);
             nextLoadUpdate.set(now + LOAD_REFRESH_INTERVAL_MS);
@@ -498,14 +506,14 @@ public class WorkerState {
         int delaySecs = 0;
         int recurSecs = 1;
         refreshActiveTimer.schedule(delaySecs,
-                                    () -> {
-                                        if (areAllConnectionsReady()) {
-                                            LOG.info("All connections are ready for worker {}:{} with id {}", assignmentId, port, workerId);
-                                            isWorkerActive.countDown();
-                                        } else {
-                                            refreshActiveTimer.schedule(recurSecs, () -> activateWorkerWhenAllConnectionsReady(), false, 0);
-                                        }
-                                    }
+            () -> {
+                if (areAllConnectionsReady()) {
+                    LOG.info("All connections are ready for worker {}:{} with id {}", assignmentId, port, workerId);
+                    isWorkerActive.countDown();
+                } else {
+                    refreshActiveTimer.schedule(recurSecs, () -> activateWorkerWhenAllConnectionsReady(), false, 0);
+                }
+            }
         );
     }
 
@@ -649,7 +657,7 @@ public class WorkerState {
         try (SupervisorIfaceFactory fac = supervisorIfaceSupplier.get()) {
             return fac.getIface().getLocalAssignmentForStorm(topologyId);
         } catch (Throwable e) {
-                //if any error/exception thrown, fetch it from zookeeper
+            //if any error/exception thrown, fetch it from zookeeper
             Assignment assignment = stormClusterState.remoteAssignmentInfo(topologyId, null);
             if (assignment == null) {
                 throw new RuntimeException("Failed to read worker assignment."
@@ -666,8 +674,8 @@ public class WorkerState {
 
         if (recvBatchSize > recvQueueSize / 2) {
             throw new IllegalArgumentException(Config.TOPOLOGY_PRODUCER_BATCH_SIZE + ":" + recvBatchSize
-                                               + " is greater than half of " + Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE + ":" +
-                                               recvQueueSize);
+                    + " is greater than half of " + Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE + ":"
+                    + recvQueueSize);
         }
 
         IWaitStrategy backPressureWaitStrategy = IWaitStrategy.createBackPressureWaitStrategy(topologyConf);
@@ -704,6 +712,7 @@ public class WorkerState {
     }
 
     /**
+     * Get worker outbound tasks.
      * @return seq of task ids that receive messages from this worker
      */
     private Set<Integer> workerOutboundTasks() {
diff --git a/storm-client/src/jvm/org/apache/storm/dependency/DependencyBlobStoreUtils.java b/storm-client/src/jvm/org/apache/storm/dependency/DependencyBlobStoreUtils.java
index 738af9b..a83e6ca 100644
--- a/storm-client/src/jvm/org/apache/storm/dependency/DependencyBlobStoreUtils.java
+++ b/storm-client/src/jvm/org/apache/storm/dependency/DependencyBlobStoreUtils.java
@@ -30,6 +30,7 @@ public class DependencyBlobStoreUtils {
         return BLOB_DEPENDENCIES_PREFIX + key;
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public static String applyUUIDToFileName(String fileName) {
         String fileNameWithExt = Files.getNameWithoutExtension(fileName);
         String ext = Files.getFileExtension(fileName);
diff --git a/storm-client/src/jvm/org/apache/storm/dependency/DependencyUploader.java b/storm-client/src/jvm/org/apache/storm/dependency/DependencyUploader.java
index 1d74d88..1f6b4f4 100644
--- a/storm-client/src/jvm/org/apache/storm/dependency/DependencyUploader.java
+++ b/storm-client/src/jvm/org/apache/storm/dependency/DependencyUploader.java
@@ -37,11 +37,11 @@ import org.apache.storm.generated.KeyAlreadyExistsException;
 import org.apache.storm.generated.KeyNotFoundException;
 import org.apache.storm.generated.SettableBlobMeta;
 import org.apache.storm.shade.com.google.common.annotations.VisibleForTesting;
+import org.apache.storm.shade.org.apache.commons.io.IOUtils;
 import org.apache.storm.utils.ObjectReader;
 import org.apache.storm.utils.Utils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.storm.shade.org.apache.commons.io.IOUtils;
 
 public class DependencyUploader {
     public static final Logger LOG = LoggerFactory.getLogger(DependencyUploader.class);
@@ -163,7 +163,7 @@ public class DependencyUploader {
             AtomicOutputStream blob = null;
             try {
                 blob = getBlobStore().createBlob(key, new SettableBlobMeta(acls));
-                try(InputStream in = Files.newInputStream(dependency.toPath())) {
+                try (InputStream in = Files.newInputStream(dependency.toPath())) {
                     IOUtils.copy(in, blob, this.uploadChuckSize);
                 }
                 blob.close();
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/DRPCInvocationsClient.java b/storm-client/src/jvm/org/apache/storm/drpc/DRPCInvocationsClient.java
index e388de7..c4ca44a 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/DRPCInvocationsClient.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/DRPCInvocationsClient.java
@@ -25,6 +25,7 @@ import org.apache.storm.thrift.transport.TTransportException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class DRPCInvocationsClient extends ThriftClient implements DistributedRPCInvocations.Iface {
     public static final Logger LOG = LoggerFactory.getLogger(DRPCInvocationsClient.class);
     private final AtomicReference<DistributedRPCInvocations.Client> client = new AtomicReference<>();
@@ -35,7 +36,7 @@ public class DRPCInvocationsClient extends ThriftClient implements DistributedRP
         super(conf, ThriftConnectionType.DRPC_INVOCATIONS, host, port, null);
         this.host = host;
         this.port = port;
-        client.set(new DistributedRPCInvocations.Client(_protocol));
+        client.set(new DistributedRPCInvocations.Client(protocol));
     }
 
     public String getHost() {
@@ -49,7 +50,7 @@ public class DRPCInvocationsClient extends ThriftClient implements DistributedRP
     public void reconnectClient() throws TException {
         if (client.get() == null) {
             reconnect();
-            client.set(new DistributedRPCInvocations.Client(_protocol));
+            client.set(new DistributedRPCInvocations.Client(protocol));
         }
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/DRPCSpout.java b/storm-client/src/jvm/org/apache/storm/drpc/DRPCSpout.java
index f8b6bc7..8756fca 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/DRPCSpout.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/DRPCSpout.java
@@ -49,38 +49,39 @@ import org.apache.storm.utils.ServiceRegistry;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class DRPCSpout extends BaseRichSpout {
     public static final Logger LOG = LoggerFactory.getLogger(DRPCSpout.class);
     //ANY CHANGE TO THIS CODE MUST BE SERIALIZABLE COMPATIBLE OR THERE WILL BE PROBLEMS
-    static final long serialVersionUID = 2387848310969237877L;
-    final String _function;
-    final String _local_drpc_id;
-    SpoutOutputCollector _collector;
-    List<DRPCInvocationsClient> _clients = new ArrayList<>();
-    transient LinkedList<Future<Void>> _futures = null;
-    transient ExecutorService _backround = null;
+    private static final long serialVersionUID = 2387848310969237877L;
+    private final String function;
+    private final String localDrpcId;
+    private SpoutOutputCollector collector;
+    private List<DRPCInvocationsClient> clients = new ArrayList<>();
+    private transient LinkedList<Future<Void>> futures = null;
+    private transient ExecutorService background = null;
 
     public DRPCSpout(String function) {
-        _function = function;
+        this.function = function;
         if (DRPCClient.isLocalOverride()) {
-            _local_drpc_id = DRPCClient.getOverrideServiceId();
+            localDrpcId = DRPCClient.getOverrideServiceId();
         } else {
-            _local_drpc_id = null;
+            localDrpcId = null;
         }
     }
 
 
     public DRPCSpout(String function, ILocalDRPC drpc) {
-        _function = function;
-        _local_drpc_id = drpc.getServiceId();
+        this.function = function;
+        localDrpcId = drpc.getServiceId();
     }
 
     public String get_function() {
-        return _function;
+        return function;
     }
 
     private void reconnectAsync(final DRPCInvocationsClient client) {
-        _futures.add(_backround.submit(new Callable<Void>() {
+        futures.add(background.submit(new Callable<Void>() {
             @Override
             public Void call() throws Exception {
                 client.reconnectClient();
@@ -99,7 +100,7 @@ public class DRPCSpout extends BaseRichSpout {
     }
 
     private void checkFutures() {
-        Iterator<Future<Void>> i = _futures.iterator();
+        Iterator<Future<Void>> i = futures.iterator();
         while (i.hasNext()) {
             Future<Void> f = i.next();
             if (f.isDone()) {
@@ -115,12 +116,12 @@ public class DRPCSpout extends BaseRichSpout {
 
     @Override
     public void open(Map<String, Object> conf, TopologyContext context, SpoutOutputCollector collector) {
-        _collector = collector;
-        if (_local_drpc_id == null) {
-            _backround = new ExtendedThreadPoolExecutor(0, Integer.MAX_VALUE,
+        this.collector = collector;
+        if (localDrpcId == null) {
+            background = new ExtendedThreadPoolExecutor(0, Integer.MAX_VALUE,
                                                         60L, TimeUnit.SECONDS,
                                                         new SynchronousQueue<Runnable>());
-            _futures = new LinkedList<>();
+            futures = new LinkedList<>();
 
             int numTasks = context.getComponentTasks(context.getThisComponentId()).size();
             int index = context.getThisTaskIndex();
@@ -133,11 +134,11 @@ public class DRPCSpout extends BaseRichSpout {
 
             if (numTasks < servers.size()) {
                 for (String s : servers) {
-                    _futures.add(_backround.submit(new Adder(s, port, conf)));
+                    futures.add(background.submit(new Adder(s, port, conf)));
                 }
             } else {
                 int i = index % servers.size();
-                _futures.add(_backround.submit(new Adder(servers.get(i), port, conf)));
+                futures.add(background.submit(new Adder(servers.get(i), port, conf)));
             }
         }
 
@@ -145,22 +146,22 @@ public class DRPCSpout extends BaseRichSpout {
 
     @Override
     public void close() {
-        for (DRPCInvocationsClient client : _clients) {
+        for (DRPCInvocationsClient client : clients) {
             client.close();
         }
     }
 
     @Override
     public void nextTuple() {
-        if (_local_drpc_id == null) {
+        if (localDrpcId == null) {
             int size = 0;
-            synchronized (_clients) {
-                size = _clients.size(); //This will only ever grow, so no need to worry about falling off the end
+            synchronized (clients) {
+                size = clients.size(); //This will only ever grow, so no need to worry about falling off the end
             }
             for (int i = 0; i < size; i++) {
                 DRPCInvocationsClient client;
-                synchronized (_clients) {
-                    client = _clients.get(i);
+                synchronized (clients) {
+                    client = clients.get(i);
                 }
                 if (!client.isConnected()) {
                     LOG.warn("DRPCInvocationsClient [{}:{}] is not connected.", client.getHost(), client.getPort());
@@ -168,13 +169,13 @@ public class DRPCSpout extends BaseRichSpout {
                     continue;
                 }
                 try {
-                    DRPCRequest req = client.fetchRequest(_function);
+                    DRPCRequest req = client.fetchRequest(function);
                     if (req.get_request_id().length() > 0) {
                         Map<String, Object> returnInfo = new HashMap<>();
                         returnInfo.put("id", req.get_request_id());
                         returnInfo.put("host", client.getHost());
                         returnInfo.put("port", client.getPort());
-                        _collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)),
+                        collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)),
                                         new DRPCMessageId(req.get_request_id(), i));
                         break;
                     }
@@ -190,16 +191,16 @@ public class DRPCSpout extends BaseRichSpout {
             }
             checkFutures();
         } else {
-            DistributedRPCInvocations.Iface drpc = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
+            DistributedRPCInvocations.Iface drpc = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(localDrpcId);
             if (drpc != null) { // can happen during shutdown of drpc while topology is still up
                 try {
-                    DRPCRequest req = drpc.fetchRequest(_function);
+                    DRPCRequest req = drpc.fetchRequest(function);
                     if (req.get_request_id().length() > 0) {
                         Map<String, Object> returnInfo = new HashMap<>();
                         returnInfo.put("id", req.get_request_id());
-                        returnInfo.put("host", _local_drpc_id);
+                        returnInfo.put("host", localDrpcId);
                         returnInfo.put("port", 0);
-                        _collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)),
+                        collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)),
                                         new DRPCMessageId(req.get_request_id(), 0));
                     }
                 } catch (AuthorizationException aze) {
@@ -220,10 +221,10 @@ public class DRPCSpout extends BaseRichSpout {
         DRPCMessageId did = (DRPCMessageId) msgId;
         DistributedRPCInvocations.Iface client;
 
-        if (_local_drpc_id == null) {
-            client = _clients.get(did.index);
+        if (localDrpcId == null) {
+            client = clients.get(did.index);
         } else {
-            client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
+            client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(localDrpcId);
         }
 
         int retryCnt = 0;
@@ -252,6 +253,7 @@ public class DRPCSpout extends BaseRichSpout {
         declarer.declare(new Fields("args", "return-info"));
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private static class DRPCMessageId {
         String id;
         int index;
@@ -276,8 +278,8 @@ public class DRPCSpout extends BaseRichSpout {
         @Override
         public Void call() throws Exception {
             DRPCInvocationsClient c = new DRPCInvocationsClient(conf, server, port);
-            synchronized (_clients) {
-                _clients.add(c);
+            synchronized (clients) {
+                clients.add(c);
             }
             return null;
         }
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/JoinResult.java b/storm-client/src/jvm/org/apache/storm/drpc/JoinResult.java
index 543e22a..d4b7214 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/JoinResult.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/JoinResult.java
@@ -30,10 +30,10 @@ import org.slf4j.LoggerFactory;
 public class JoinResult extends BaseRichBolt {
     public static final Logger LOG = LoggerFactory.getLogger(JoinResult.class);
 
-    String returnComponent;
-    Map<Object, Tuple> returns = new HashMap<>();
-    Map<Object, Tuple> results = new HashMap<>();
-    OutputCollector _collector;
+    private String returnComponent;
+    private Map<Object, Tuple> returns = new HashMap<>();
+    private Map<Object, Tuple> results = new HashMap<>();
+    private OutputCollector collector;
 
     public JoinResult(String returnComponent) {
         this.returnComponent = returnComponent;
@@ -41,7 +41,7 @@ public class JoinResult extends BaseRichBolt {
 
     @Override
     public void prepare(Map<String, Object> map, TopologyContext context, OutputCollector collector) {
-        _collector = collector;
+        this.collector = collector;
     }
 
     @Override
@@ -60,9 +60,9 @@ public class JoinResult extends BaseRichBolt {
             List<Tuple> anchors = new ArrayList<>();
             anchors.add(result);
             anchors.add(returner);
-            _collector.emit(anchors, new Values("" + result.getValue(1), returner.getValue(1)));
-            _collector.ack(result);
-            _collector.ack(returner);
+            collector.emit(anchors, new Values("" + result.getValue(1), returner.getValue(1)));
+            collector.ack(result);
+            collector.ack(returner);
         }
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/KeyedFairBolt.java b/storm-client/src/jvm/org/apache/storm/drpc/KeyedFairBolt.java
index fc10f86..7a358bf 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/KeyedFairBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/KeyedFairBolt.java
@@ -26,13 +26,13 @@ import org.apache.storm.utils.KeyedRoundRobinQueue;
 
 
 public class KeyedFairBolt implements IRichBolt, FinishedCallback {
-    IRichBolt _delegate;
-    KeyedRoundRobinQueue<Tuple> _rrQueue;
-    Thread _executor;
-    FinishedCallback _callback;
+    IRichBolt delegate;
+    KeyedRoundRobinQueue<Tuple> rrQueue;
+    Thread executor;
+    FinishedCallback callback;
 
     public KeyedFairBolt(IRichBolt delegate) {
-        _delegate = delegate;
+        this.delegate = delegate;
     }
 
     public KeyedFairBolt(IBasicBolt delegate) {
@@ -41,48 +41,48 @@ public class KeyedFairBolt implements IRichBolt, FinishedCallback {
 
     @Override
     public void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) {
-        if (_delegate instanceof FinishedCallback) {
-            _callback = (FinishedCallback) _delegate;
+        if (delegate instanceof FinishedCallback) {
+            callback = (FinishedCallback) delegate;
         }
-        _delegate.prepare(topoConf, context, collector);
-        _rrQueue = new KeyedRoundRobinQueue<Tuple>();
-        _executor = new Thread(new Runnable() {
+        delegate.prepare(topoConf, context, collector);
+        rrQueue = new KeyedRoundRobinQueue<Tuple>();
+        executor = new Thread(new Runnable() {
             @Override
             public void run() {
                 try {
                     while (true) {
-                        _delegate.execute(_rrQueue.take());
+                        delegate.execute(rrQueue.take());
                     }
                 } catch (InterruptedException e) {
-
+                    //ignore
                 }
             }
         });
-        _executor.setDaemon(true);
-        _executor.start();
+        executor.setDaemon(true);
+        executor.start();
     }
 
     @Override
     public void execute(Tuple input) {
         Object key = input.getValue(0);
-        _rrQueue.add(key, input);
+        rrQueue.add(key, input);
     }
 
     @Override
     public void cleanup() {
-        _executor.interrupt();
-        _delegate.cleanup();
+        executor.interrupt();
+        delegate.cleanup();
     }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        _delegate.declareOutputFields(declarer);
+        delegate.declareOutputFields(declarer);
     }
 
     @Override
     public void finishedId(Object id) {
-        if (_callback != null) {
-            _callback.finishedId(id);
+        if (callback != null) {
+            callback.finishedId(id);
         }
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCInputDeclarer.java b/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCInputDeclarer.java
index 5ef3549..d026ec2 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCInputDeclarer.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCInputDeclarer.java
@@ -16,6 +16,7 @@ import org.apache.storm.grouping.CustomStreamGrouping;
 import org.apache.storm.topology.ComponentConfigurationDeclarer;
 import org.apache.storm.tuple.Fields;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public interface LinearDRPCInputDeclarer extends ComponentConfigurationDeclarer<LinearDRPCInputDeclarer> {
     public LinearDRPCInputDeclarer fieldsGrouping(Fields fields);
 
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCTopologyBuilder.java b/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCTopologyBuilder.java
index 53f8eb9..6512976 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCTopologyBuilder.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCTopologyBuilder.java
@@ -41,7 +41,7 @@ import org.apache.storm.topology.OutputFieldsGetter;
 import org.apache.storm.topology.TopologyBuilder;
 import org.apache.storm.tuple.Fields;
 
-
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class LinearDRPCTopologyBuilder {
     String function;
     List<Component> components = new ArrayList<>();
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/ReturnResults.java b/storm-client/src/jvm/org/apache/storm/drpc/ReturnResults.java
index b120b51..6f40645 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/ReturnResults.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/ReturnResults.java
@@ -37,15 +37,15 @@ public class ReturnResults extends BaseRichBolt {
     public static final Logger LOG = LoggerFactory.getLogger(ReturnResults.class);
     //ANY CHANGE TO THIS CODE MUST BE SERIALIZABLE COMPATIBLE OR THERE WILL BE PROBLEMS
     static final long serialVersionUID = -774882142710631591L;
-    OutputCollector _collector;
+    OutputCollector collector;
     boolean local;
-    Map<String, Object> _conf;
-    Map<List, DRPCInvocationsClient> _clients = new HashMap<List, DRPCInvocationsClient>();
+    Map<String, Object> conf;
+    Map<List, DRPCInvocationsClient> clients = new HashMap<List, DRPCInvocationsClient>();
 
     @Override
     public void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) {
-        _conf = topoConf;
-        _collector = collector;
+        conf = topoConf;
+        this.collector = collector;
         local = topoConf.get(Config.STORM_CLUSTER_MODE).equals("local");
     }
 
@@ -59,7 +59,7 @@ public class ReturnResults extends BaseRichBolt {
                 retMap = (Map<String, Object>) JSONValue.parseWithException(returnInfo);
             } catch (ParseException e) {
                 LOG.error("Parseing returnInfo failed", e);
-                _collector.fail(input);
+                collector.fail(input);
                 return;
             }
             final String host = (String) retMap.get("host");
@@ -69,19 +69,21 @@ public class ReturnResults extends BaseRichBolt {
             if (local) {
                 client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(host);
             } else {
-                List server = new ArrayList() {{
-                    add(host);
-                    add(port);
-                }};
+                List server = new ArrayList() {
+                    {
+                        add(host);
+                        add(port);
+                    }
+                };
 
-                if (!_clients.containsKey(server)) {
+                if (!clients.containsKey(server)) {
                     try {
-                        _clients.put(server, new DRPCInvocationsClient(_conf, host, port));
+                        clients.put(server, new DRPCInvocationsClient(conf, host, port));
                     } catch (TTransportException ex) {
                         throw new RuntimeException(ex);
                     }
                 }
-                client = _clients.get(server);
+                client = clients.get(server);
             }
 
 
@@ -91,16 +93,16 @@ public class ReturnResults extends BaseRichBolt {
                 retryCnt++;
                 try {
                     client.result(id, result);
-                    _collector.ack(input);
+                    collector.ack(input);
                     break;
                 } catch (AuthorizationException aze) {
                     LOG.error("Not authorized to return results to DRPC server", aze);
-                    _collector.fail(input);
+                    collector.fail(input);
                     throw new RuntimeException(aze);
                 } catch (TException tex) {
                     if (retryCnt >= maxRetries) {
                         LOG.error("Failed to return results to DRPC server", tex);
-                        _collector.fail(input);
+                        collector.fail(input);
                     }
                     reconnectClient((DRPCInvocationsClient) client);
                 }
@@ -121,7 +123,7 @@ public class ReturnResults extends BaseRichBolt {
 
     @Override
     public void cleanup() {
-        for (DRPCInvocationsClient c : _clients.values()) {
+        for (DRPCInvocationsClient c : clients.values()) {
             c.close();
         }
     }
diff --git a/storm-client/src/jvm/org/apache/storm/executor/Executor.java b/storm-client/src/jvm/org/apache/storm/executor/Executor.java
index c69596e..7e687ad 100644
--- a/storm-client/src/jvm/org/apache/storm/executor/Executor.java
+++ b/storm-client/src/jvm/org/apache/storm/executor/Executor.java
@@ -325,20 +325,20 @@ public abstract class Executor implements Callable, JCQueue.Consumer {
         for (final Integer interval : intervalToTaskToMetricToRegistry.keySet()) {
             StormTimer timerTask = workerData.getUserTimer();
             timerTask.scheduleRecurring(interval, interval,
-                                        () -> {
-                                            TupleImpl tuple =
-                                                new TupleImpl(workerTopologyContext, new Values(interval), Constants.SYSTEM_COMPONENT_ID,
-                                                              (int) Constants.SYSTEM_TASK_ID, Constants.METRICS_TICK_STREAM_ID);
-                                            AddressedTuple metricsTickTuple = new AddressedTuple(AddressedTuple.BROADCAST_DEST, tuple);
-                                            try {
-                                                receiveQueue.publish(metricsTickTuple);
-                                                receiveQueue.flush();  // avoid buffering
-                                            } catch (InterruptedException e) {
-                                                LOG.warn("Thread interrupted when publishing metrics. Setting interrupt flag.");
-                                                Thread.currentThread().interrupt();
-                                                return;
-                                            }
-                                        }
+                () -> {
+                    TupleImpl tuple =
+                        new TupleImpl(workerTopologyContext, new Values(interval), Constants.SYSTEM_COMPONENT_ID,
+                                      (int) Constants.SYSTEM_TASK_ID, Constants.METRICS_TICK_STREAM_ID);
+                    AddressedTuple metricsTickTuple = new AddressedTuple(AddressedTuple.BROADCAST_DEST, tuple);
+                    try {
+                        receiveQueue.publish(metricsTickTuple);
+                        receiveQueue.flush();  // avoid buffering
+                    } catch (InterruptedException e) {
+                        LOG.warn("Thread interrupted when publishing metrics. Setting interrupt flag.");
+                        Thread.currentThread().interrupt();
+                        return;
+                    }
+                }
             );
         }
     }
@@ -355,21 +355,21 @@ public abstract class Executor implements Callable, JCQueue.Consumer {
             } else {
                 StormTimer timerTask = workerData.getUserTimer();
                 timerTask.scheduleRecurring(tickTimeSecs, tickTimeSecs,
-                                            () -> {
-                                                TupleImpl tuple = new TupleImpl(workerTopologyContext, new Values(tickTimeSecs),
-                                                                                Constants.SYSTEM_COMPONENT_ID,
-                                                                                (int) Constants.SYSTEM_TASK_ID,
-                                                                                Constants.SYSTEM_TICK_STREAM_ID);
-                                                AddressedTuple tickTuple = new AddressedTuple(AddressedTuple.BROADCAST_DEST, tuple);
-                                                try {
-                                                    receiveQueue.publish(tickTuple);
-                                                    receiveQueue.flush(); // avoid buffering
-                                                } catch (InterruptedException e) {
-                                                    LOG.warn("Thread interrupted when emitting tick tuple. Setting interrupt flag.");
-                                                    Thread.currentThread().interrupt();
-                                                    return;
-                                                }
-                                            }
+                    () -> {
+                        TupleImpl tuple = new TupleImpl(workerTopologyContext, new Values(tickTimeSecs),
+                                                        Constants.SYSTEM_COMPONENT_ID,
+                                                        (int) Constants.SYSTEM_TASK_ID,
+                                                        Constants.SYSTEM_TICK_STREAM_ID);
+                        AddressedTuple tickTuple = new AddressedTuple(AddressedTuple.BROADCAST_DEST, tuple);
+                        try {
+                            receiveQueue.publish(tickTuple);
+                            receiveQueue.flush(); // avoid buffering
+                        } catch (InterruptedException e) {
+                            LOG.warn("Thread interrupted when emitting tick tuple. Setting interrupt flag.");
+                            Thread.currentThread().interrupt();
+                            return;
+                        }
+                    }
                 );
             }
         }
diff --git a/storm-client/src/jvm/org/apache/storm/executor/ExecutorTransfer.java b/storm-client/src/jvm/org/apache/storm/executor/ExecutorTransfer.java
index eee553e..4121a85 100644
--- a/storm-client/src/jvm/org/apache/storm/executor/ExecutorTransfer.java
+++ b/storm-client/src/jvm/org/apache/storm/executor/ExecutorTransfer.java
@@ -36,7 +36,7 @@ public class ExecutorTransfer {
     private int indexingBase = 0;
     private ArrayList<JCQueue> localReceiveQueues; // [taskId-indexingBase] => queue : List of all recvQs local to this worker
     private AtomicReferenceArray<JCQueue> queuesToFlush;
-        // [taskId-indexingBase] => queue, some entries can be null. : outbound Qs for this executor instance
+    // [taskId-indexingBase] => queue, some entries can be null. : outbound Qs for this executor instance
 
 
     public ExecutorTransfer(WorkerState workerData, Map<String, Object> topoConf) {
diff --git a/storm-client/src/jvm/org/apache/storm/executor/bolt/BoltExecutor.java b/storm-client/src/jvm/org/apache/storm/executor/bolt/BoltExecutor.java
index 2ac76ec..4fbf1b3 100644
--- a/storm-client/src/jvm/org/apache/storm/executor/bolt/BoltExecutor.java
+++ b/storm-client/src/jvm/org/apache/storm/executor/bolt/BoltExecutor.java
@@ -33,7 +33,7 @@ import org.apache.storm.hooks.info.BoltExecuteInfo;
 import org.apache.storm.messaging.IConnection;
 import org.apache.storm.metric.api.IMetricsRegistrant;
 import org.apache.storm.policy.IWaitStrategy;
-import org.apache.storm.policy.IWaitStrategy.WAIT_SITUATION;
+import org.apache.storm.policy.IWaitStrategy.WaitSituation;
 import org.apache.storm.policy.WaitStrategyPark;
 import org.apache.storm.security.auth.IAutoCredentials;
 import org.apache.storm.shade.com.google.common.collect.ImmutableMap;
@@ -75,10 +75,10 @@ public class BoltExecutor extends Executor {
             this.consumeWaitStrategy = makeSystemBoltWaitStrategy();
         } else {
             this.consumeWaitStrategy = ReflectionUtils.newInstance((String) topoConf.get(Config.TOPOLOGY_BOLT_WAIT_STRATEGY));
-            this.consumeWaitStrategy.prepare(topoConf, WAIT_SITUATION.BOLT_WAIT);
+            this.consumeWaitStrategy.prepare(topoConf, WaitSituation.BOLT_WAIT);
         }
         this.backPressureWaitStrategy = ReflectionUtils.newInstance((String) topoConf.get(Config.TOPOLOGY_BACKPRESSURE_WAIT_STRATEGY));
-        this.backPressureWaitStrategy.prepare(topoConf, WAIT_SITUATION.BACK_PRESSURE_WAIT);
+        this.backPressureWaitStrategy.prepare(topoConf, WaitSituation.BACK_PRESSURE_WAIT);
         this.stats = new BoltExecutorStats(ConfigUtils.samplingRate(this.getTopoConf()),
                                            ObjectReader.getInt(this.getTopoConf().get(Config.NUM_STAT_BUCKETS)));
         this.builtInMetrics = new BuiltinBoltMetrics(stats);
@@ -88,7 +88,7 @@ public class BoltExecutor extends Executor {
         WaitStrategyPark ws = new WaitStrategyPark();
         Map<String, Object> conf = new HashMap<>();
         conf.put(Config.TOPOLOGY_BOLT_WAIT_PARK_MICROSEC, 5000);
-        ws.prepare(conf, WAIT_SITUATION.BOLT_WAIT);
+        ws.prepare(conf, WaitSituation.BOLT_WAIT);
         return ws;
     }
 
@@ -239,8 +239,8 @@ public class BoltExecutor extends Executor {
                 LOG.info("Execute done TUPLE {} TASK: {} DELTA: {}", tuple, taskId, delta);
             }
             TopologyContext topologyContext = idToTask.get(taskId - idToTaskBase).getUserContext();
-            if (!topologyContext.getHooks().isEmpty()) // perf critical check to avoid unnecessary allocation
-            {
+            if (!topologyContext.getHooks().isEmpty()) {
+                // perf critical check to avoid unnecessary allocation
                 new BoltExecuteInfo(tuple, taskId, delta).applyOn(topologyContext);
             }
             if (delta >= 0) {
diff --git a/storm-client/src/jvm/org/apache/storm/executor/bolt/BoltOutputCollectorImpl.java b/storm-client/src/jvm/org/apache/storm/executor/bolt/BoltOutputCollectorImpl.java
index 5ea7838..da9af50 100644
--- a/storm-client/src/jvm/org/apache/storm/executor/bolt/BoltOutputCollectorImpl.java
+++ b/storm-client/src/jvm/org/apache/storm/executor/bolt/BoltOutputCollectorImpl.java
@@ -98,8 +98,8 @@ public class BoltOutputCollectorImpl implements IOutputCollector {
                     if (rootIds.size() > 0) {
                         long edgeId = MessageId.generateId(random);
                         ((TupleImpl) a).updateAckVal(edgeId);
-                        for (Long root_id : rootIds) {
-                            putXor(anchorsToIds, root_id, edgeId);
+                        for (Long rootId : rootIds) {
+                            putXor(anchorsToIds, rootId, edgeId);
                         }
                     }
                 }
@@ -202,7 +202,7 @@ public class BoltOutputCollectorImpl implements IOutputCollector {
     private void putXor(Map<Long, Long> pending, Long key, Long id) {
         Long curr = pending.get(key);
         if (curr == null) {
-            curr = 0l;
+            curr = 0L;
         }
         pending.put(key, Utils.bitXor(curr, id));
     }
diff --git a/storm-client/src/jvm/org/apache/storm/executor/spout/SpoutExecutor.java b/storm-client/src/jvm/org/apache/storm/executor/spout/SpoutExecutor.java
index 0c45d56..c10ab2e 100644
--- a/storm-client/src/jvm/org/apache/storm/executor/spout/SpoutExecutor.java
+++ b/storm-client/src/jvm/org/apache/storm/executor/spout/SpoutExecutor.java
@@ -33,7 +33,7 @@ import org.apache.storm.executor.TupleInfo;
 import org.apache.storm.hooks.info.SpoutAckInfo;
 import org.apache.storm.hooks.info.SpoutFailInfo;
 import org.apache.storm.policy.IWaitStrategy;
-import org.apache.storm.policy.IWaitStrategy.WAIT_SITUATION;
+import org.apache.storm.policy.IWaitStrategy.WaitSituation;
 import org.apache.storm.shade.com.google.common.collect.ImmutableMap;
 import org.apache.storm.spout.ISpout;
 import org.apache.storm.spout.SpoutOutputCollector;
@@ -75,9 +75,9 @@ public class SpoutExecutor extends Executor {
     public SpoutExecutor(final WorkerState workerData, final List<Long> executorId, Map<String, String> credentials) {
         super(workerData, executorId, credentials, ClientStatsUtil.SPOUT);
         this.spoutWaitStrategy = ReflectionUtils.newInstance((String) topoConf.get(Config.TOPOLOGY_SPOUT_WAIT_STRATEGY));
-        this.spoutWaitStrategy.prepare(topoConf, WAIT_SITUATION.SPOUT_WAIT);
+        this.spoutWaitStrategy.prepare(topoConf, WaitSituation.SPOUT_WAIT);
         this.backPressureWaitStrategy = ReflectionUtils.newInstance((String) topoConf.get(Config.TOPOLOGY_BACKPRESSURE_WAIT_STRATEGY));
-        this.backPressureWaitStrategy.prepare(topoConf, WAIT_SITUATION.BACK_PRESSURE_WAIT);
+        this.backPressureWaitStrategy.prepare(topoConf, WaitSituation.BACK_PRESSURE_WAIT);
 
         this.lastActive = new AtomicBoolean(false);
         this.hasAckers = StormCommon.hasAckers(topoConf);
diff --git a/storm-client/src/jvm/org/apache/storm/executor/spout/SpoutOutputCollectorImpl.java b/storm-client/src/jvm/org/apache/storm/executor/spout/SpoutOutputCollectorImpl.java
index b71b331..c370d6a 100644
--- a/storm-client/src/jvm/org/apache/storm/executor/spout/SpoutOutputCollectorImpl.java
+++ b/storm-client/src/jvm/org/apache/storm/executor/spout/SpoutOutputCollectorImpl.java
@@ -46,7 +46,7 @@ public class SpoutOutputCollectorImpl implements ISpoutOutputCollector {
     private final RotatingMap<Long, TupleInfo> pending;
     private final long spoutExecutorThdId;
     private TupleInfo globalTupleInfo = new TupleInfo();
-        // thread safety: assumes Collector.emit*() calls are externally synchronized (if needed).
+    // thread safety: assumes Collector.emit*() calls are externally synchronized (if needed).
 
     @SuppressWarnings("unused")
     public SpoutOutputCollectorImpl(ISpout spout, SpoutExecutor executor, Task taskData,
@@ -144,7 +144,6 @@ public class SpoutOutputCollectorImpl implements ISpoutOutputCollector {
         }
 
         if (needAck) {
-            boolean sample = executor.samplerCheck();
             TupleInfo info = new TupleInfo();
             info.setTaskId(this.taskId);
             info.setStream(stream);
@@ -153,6 +152,7 @@ public class SpoutOutputCollectorImpl implements ISpoutOutputCollector {
             if (isDebug) {
                 info.setValues(values);
             }
+            boolean sample = executor.samplerCheck();
             if (sample) {
                 info.setTimestamp(System.currentTimeMillis());
             }
@@ -164,8 +164,8 @@ public class SpoutOutputCollectorImpl implements ISpoutOutputCollector {
             // Reusing TupleInfo object as we directly call executor.ackSpoutMsg() & are not sending msgs. perf critical
             if (isDebug) {
                 if (spoutExecutorThdId != Thread.currentThread().getId()) {
-                    throw new RuntimeException("Detected background thread emitting tuples for the spout. " +
-                                               "Spout Output Collector should only emit from the main spout executor thread.");
+                    throw new RuntimeException("Detected background thread emitting tuples for the spout. "
+                            + "Spout Output Collector should only emit from the main spout executor thread.");
                 }
             }
             globalTupleInfo.clear();
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/CustomStreamGrouping.java b/storm-client/src/jvm/org/apache/storm/grouping/CustomStreamGrouping.java
index eb532f7..9630b21 100644
--- a/storm-client/src/jvm/org/apache/storm/grouping/CustomStreamGrouping.java
+++ b/storm-client/src/jvm/org/apache/storm/grouping/CustomStreamGrouping.java
@@ -23,7 +23,7 @@ public interface CustomStreamGrouping extends Serializable {
      * Tells the stream grouping at runtime the tasks in the target bolt. This information should be used in chooseTasks to determine the
      * target tasks.
      *
-     * It also tells the grouping the metadata on the stream this grouping will be used on.
+     * <p>It also tells the grouping the metadata on the stream this grouping will be used on.
      */
     void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks);
 
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/Load.java b/storm-client/src/jvm/org/apache/storm/grouping/Load.java
index 2c6662d..a809267 100644
--- a/storm-client/src/jvm/org/apache/storm/grouping/Load.java
+++ b/storm-client/src/jvm/org/apache/storm/grouping/Load.java
@@ -21,7 +21,7 @@ public class Load {
     private double connectionLoad = 0.0; //0 no load to 1 fully loaded
 
     /**
-     * Create a new load
+     * Create a new load.
      *
      * @param hasMetrics     have metrics been reported yet?
      * @param boltLoad       the load as reported by the bolt 0.0 no load 1.0 fully loaded
@@ -34,6 +34,7 @@ public class Load {
     }
 
     /**
+     * Check whether has metrics.
      * @return true if metrics have been reported so far.
      */
     public boolean hasMetrics() {
@@ -41,6 +42,7 @@ public class Load {
     }
 
     /**
+     * Get bolt load.
      * @return the load as reported by the bolt.
      */
     public double getBoltLoad() {
@@ -48,6 +50,7 @@ public class Load {
     }
 
     /**
+     * Get connection load.
      * @return the load as reported by the connection
      */
     public double getConnectionLoad() {
@@ -55,6 +58,7 @@ public class Load {
     }
 
     /**
+     * Get load.
      * @return the load that is a combination of sub loads.
      */
     public double getLoad() {
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/LoadMapping.java b/storm-client/src/jvm/org/apache/storm/grouping/LoadMapping.java
index 40fcd99..569d1c4 100644
--- a/storm-client/src/jvm/org/apache/storm/grouping/LoadMapping.java
+++ b/storm-client/src/jvm/org/apache/storm/grouping/LoadMapping.java
@@ -17,12 +17,12 @@ import java.util.Map;
 import java.util.concurrent.atomic.AtomicReference;
 
 /**
- * Holds a list of the current loads
+ * Holds a list of the current loads.
  */
 public class LoadMapping {
     private static final Load NOT_CONNECTED = new Load(false, 1.0, 1.0);
-    private final AtomicReference<Map<Integer, Load>> _local = new AtomicReference<Map<Integer, Load>>(new HashMap<Integer, Load>());
-    private final AtomicReference<Map<Integer, Load>> _remote = new AtomicReference<Map<Integer, Load>>(new HashMap<Integer, Load>());
+    private final AtomicReference<Map<Integer, Load>> local = new AtomicReference<Map<Integer, Load>>(new HashMap<Integer, Load>());
+    private final AtomicReference<Map<Integer, Load>> remote = new AtomicReference<Map<Integer, Load>>(new HashMap<Integer, Load>());
 
     public void setLocal(Map<Integer, Double> local) {
         Map<Integer, Load> newLocal = new HashMap<Integer, Load>();
@@ -31,21 +31,21 @@ public class LoadMapping {
                 newLocal.put(entry.getKey(), new Load(true, entry.getValue(), 0.0));
             }
         }
-        _local.set(newLocal);
+        this.local.set(newLocal);
     }
 
     public void setRemote(Map<Integer, Load> remote) {
         if (remote != null) {
-            _remote.set(new HashMap<Integer, Load>(remote));
+            this.remote.set(new HashMap<Integer, Load>(remote));
         } else {
-            _remote.set(new HashMap<Integer, Load>());
+            this.remote.set(new HashMap<Integer, Load>());
         }
     }
 
     public Load getLoad(int task) {
-        Load ret = _local.get().get(task);
+        Load ret = local.get().get(task);
         if (ret == null) {
-            ret = _remote.get().get(task);
+            ret = remote.get().get(task);
         }
         if (ret == null) {
             ret = NOT_CONNECTED;
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/PartialKeyGrouping.java b/storm-client/src/jvm/org/apache/storm/grouping/PartialKeyGrouping.java
index bba635e..d61494c 100644
--- a/storm-client/src/jvm/org/apache/storm/grouping/PartialKeyGrouping.java
+++ b/storm-client/src/jvm/org/apache/storm/grouping/PartialKeyGrouping.java
@@ -25,14 +25,14 @@ import org.apache.storm.task.WorkerTopologyContext;
 import org.apache.storm.tuple.Fields;
 
 /**
- * A variation on FieldGrouping. This grouping operates on a partitioning of the incoming tuples (like a FieldGrouping), but it can send
- * Tuples from a given partition to multiple downstream tasks.
+ * A variation on FieldGrouping. This grouping operates on a partitioning of the incoming tuples (like a FieldGrouping),
+ * but it can send Tuples from a given partition to multiple downstream tasks.
  *
- * Given a total pool of target tasks, this grouping will always send Tuples with a given key to one member of a subset of those tasks. Each
- * key is assigned a subset of tasks. Each tuple is then sent to one task from that subset.
+ * <p>Given a total pool of target tasks, this grouping will always send Tuples with a given key to one member of a
+ * subset of those tasks. Each key is assigned a subset of tasks. Each tuple is then sent to one task from that subset.
  *
- * Notes: - the default TaskSelector ensures each task gets as close to a balanced number of Tuples as possible - the default
- * AssignmentCreator hashes the key and produces an assignment of two tasks
+ * <p>Notes: - the default TaskSelector ensures each task gets as close to a balanced number of Tuples as possible - the
+ * default AssignmentCreator hashes the key and produces an assignment of two tasks
  */
 public class PartialKeyGrouping implements CustomStreamGrouping, Serializable {
     private static final long serialVersionUID = -1672360572274911808L;
@@ -133,8 +133,8 @@ public class PartialKeyGrouping implements CustomStreamGrouping, Serializable {
     /**
      * This interface is responsible for choosing a subset of the target tasks to use for a given key.
      *
-     * NOTE: whatever scheme you use to create the assignment should be deterministic. This may be executed on multiple Storm Workers, thus
-     * each of them needs to come up with the same assignment for a given key.
+     * <p>NOTE: whatever scheme you use to create the assignment should be deterministic. This may be executed on
+     * multiple Storm Workers, thus each of them needs to come up with the same assignment for a given key.
      */
     public interface AssignmentCreator extends Serializable {
         int[] createAssignment(List<Integer> targetTasks, byte[] key);
diff --git a/storm-client/src/jvm/org/apache/storm/hooks/BaseWorkerHook.java b/storm-client/src/jvm/org/apache/storm/hooks/BaseWorkerHook.java
index 2915f5c..0539ab0 100644
--- a/storm-client/src/jvm/org/apache/storm/hooks/BaseWorkerHook.java
+++ b/storm-client/src/jvm/org/apache/storm/hooks/BaseWorkerHook.java
@@ -17,14 +17,14 @@ import java.util.Map;
 import org.apache.storm.task.WorkerTopologyContext;
 
 /**
- * A BaseWorkerHook is a noop implementation of IWorkerHook. You may extends this class and implement any and/or all methods you need for
- * your workers.
+ * A BaseWorkerHook is a noop implementation of IWorkerHook. You may extends this class and implement any and/or all
+ * methods you need for your workers.
  */
 public class BaseWorkerHook implements IWorkerHook, Serializable {
     private static final long serialVersionUID = 2589466485198339529L;
 
     /**
-     * This method is called when a worker is started
+     * This method is called when a worker is started.
      *
      * @param topoConf The Storm configuration for this worker
      * @param context  This object can be used to get information about this worker's place within the topology
@@ -35,7 +35,7 @@ public class BaseWorkerHook implements IWorkerHook, Serializable {
     }
 
     /**
-     * This method is called right before a worker shuts down
+     * This method is called right before a worker shuts down.
      */
     @Override
     public void shutdown() {
diff --git a/storm-client/src/jvm/org/apache/storm/hooks/IWorkerHook.java b/storm-client/src/jvm/org/apache/storm/hooks/IWorkerHook.java
index a0ec059..fe68bcc 100644
--- a/storm-client/src/jvm/org/apache/storm/hooks/IWorkerHook.java
+++ b/storm-client/src/jvm/org/apache/storm/hooks/IWorkerHook.java
@@ -22,7 +22,7 @@ import org.apache.storm.task.WorkerTopologyContext;
  */
 public interface IWorkerHook extends Serializable {
     /**
-     * This method is called when a worker is started
+     * This method is called when a worker is started.
      *
      * @param topoConf The Storm configuration for this worker
      * @param context  This object can be used to get information about this worker's place within the topology
@@ -30,7 +30,7 @@ public interface IWorkerHook extends Serializable {
     void start(Map<String, Object> topoConf, WorkerTopologyContext context);
 
     /**
-     * This method is called right before a worker shuts down
+     * This method is called right before a worker shuts down.
      */
     void shutdown();
 }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/ConnectionWithStatus.java b/storm-client/src/jvm/org/apache/storm/messaging/ConnectionWithStatus.java
index 818a9a4..da20253 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/ConnectionWithStatus.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/ConnectionWithStatus.java
@@ -15,7 +15,7 @@ package org.apache.storm.messaging;
 public abstract class ConnectionWithStatus implements IConnection {
 
     /**
-     * whether this connection is available to transfer data
+     * whether this connection is available to transfer data.
      */
     public abstract Status status();
 
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/DeserializingConnectionCallback.java b/storm-client/src/jvm/org/apache/storm/messaging/DeserializingConnectionCallback.java
index 8ab52fd..b038e02 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/DeserializingConnectionCallback.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/DeserializingConnectionCallback.java
@@ -36,7 +36,7 @@ public class DeserializingConnectionCallback implements IConnectionCallback, IMe
     private final Map<String, Object> conf;
     private final GeneralTopologyContext context;
 
-    private final ThreadLocal<KryoTupleDeserializer> _des =
+    private final ThreadLocal<KryoTupleDeserializer> des =
         new ThreadLocal<KryoTupleDeserializer>() {
             @Override
             protected KryoTupleDeserializer initialValue() {
@@ -60,7 +60,7 @@ public class DeserializingConnectionCallback implements IConnectionCallback, IMe
 
     @Override
     public void recv(List<TaskMessage> batch) {
-        KryoTupleDeserializer des = _des.get();
+        KryoTupleDeserializer des = this.des.get();
         ArrayList<AddressedTuple> ret = new ArrayList<>(batch.size());
         for (TaskMessage message : batch) {
             Tuple tuple = des.deserialize(message.message());
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/IConnection.java b/storm-client/src/jvm/org/apache/storm/messaging/IConnection.java
index f713c7f..11e98ac 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/IConnection.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/IConnection.java
@@ -34,15 +34,13 @@ public interface IConnection extends AutoCloseable {
     void sendBackPressureStatus(BackPressureStatus bpStatus);
 
     /**
-     * send batch messages
-     *
-     * @param msgs
+     * send batch messages.
      */
 
     void send(Iterator<TaskMessage> msgs);
 
     /**
-     * Get the current load for the given tasks
+     * Get the current load for the given tasks.
      *
      * @param tasks the tasks to look for.
      * @return a Load for each of the tasks it knows about.
@@ -50,14 +48,14 @@ public interface IConnection extends AutoCloseable {
     Map<Integer, Load> getLoad(Collection<Integer> tasks);
 
     /**
-     * Get the port for this connection
+     * Get the port for this connection.
      *
      * @return The port this connection is using
      */
     int getPort();
 
     /**
-     * close this connection
+     * close this connection.
      */
     @Override
     void close();
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/IConnectionCallback.java b/storm-client/src/jvm/org/apache/storm/messaging/IConnectionCallback.java
index d9b3f7b..5719eb8 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/IConnectionCallback.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/IConnectionCallback.java
@@ -19,7 +19,7 @@ import java.util.List;
  */
 public interface IConnectionCallback {
     /**
-     * A batch of new messages have arrived to be processed
+     * A batch of new messages have arrived to be processed.
      *
      * @param batch the messages to be processed
      */
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/IContext.java b/storm-client/src/jvm/org/apache/storm/messaging/IContext.java
index 057ae30..ac56a8a 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/IContext.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/IContext.java
@@ -19,44 +19,44 @@ import java.util.function.Supplier;
 /**
  * This interface needs to be implemented for messaging plugin.
  *
- * Messaging plugin is specified via Storm config parameter, storm.messaging.transport.
+ * <p>Messaging plugin is specified via Storm config parameter, storm.messaging.transport.
  *
- * A messaging plugin should have a default constructor and implements IContext interface. Upon construction, we will invoke
+ * <p>A messaging plugin should have a default constructor and implements IContext interface. Upon construction, we will invoke
  * IContext::prepare(topoConf) to enable context to be configured according to storm configuration.
  */
 public interface IContext {
     /**
-     * This method is invoked at the startup of messaging plugin
+     * This method is invoked at the startup of messaging plugin.
      *
      * @param topoConf storm configuration
      */
     void prepare(Map<String, Object> topoConf);
 
     /**
-     * This method is invoked when a worker is unloading a messaging plugin
+     * This method is invoked when a worker is unloading a messaging plugin.
      */
     void term();
 
     /**
-     * This method establishes a server side connection
+     * This method establishes a server side connection.
      *
-     * @param storm_id topology ID
+     * @param stormId topology ID
      * @param port     port #
      * @param cb The callback to deliver received messages to
      * @param newConnectionResponse Supplier of the initial message to send to new client connections
      * @return server side connection
      */
-    IConnection bind(String storm_id, int port, IConnectionCallback cb, Supplier<Object> newConnectionResponse);
+    IConnection bind(String stormId, int port, IConnectionCallback cb, Supplier<Object> newConnectionResponse);
 
     /**
      * This method establish a client side connection to a remote server
-     * implementation should return a new connection every call
+     * implementation should return a new connection every call.
      *
-     * @param storm_id       topology ID
+     * @param stormId       topology ID
      * @param host           remote host
      * @param port           remote port
      * @param remoteBpStatus array of booleans reflecting Back Pressure status of remote tasks.
      * @return client side connection
      */
-    IConnection connect(String storm_id, String host, int port, AtomicBoolean[] remoteBpStatus);
+    IConnection connect(String stormId, String host, int port, AtomicBoolean[] remoteBpStatus);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/TaskMessage.java b/storm-client/src/jvm/org/apache/storm/messaging/TaskMessage.java
index c32a93b..9c2975d 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/TaskMessage.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/TaskMessage.java
@@ -15,26 +15,26 @@ package org.apache.storm.messaging;
 import java.nio.ByteBuffer;
 
 public class TaskMessage {
-    private int _task;
-    private byte[] _message;
+    private int task;
+    private byte[] message;
 
     public TaskMessage(int task, byte[] message) {
-        _task = task;
-        _message = message;
+        this.task = task;
+        this.message = message;
     }
 
     public int task() {
-        return _task;
+        return task;
     }
 
     public byte[] message() {
-        return _message;
+        return message;
     }
 
     public ByteBuffer serialize() {
-        ByteBuffer bb = ByteBuffer.allocate(_message.length + 2);
-        bb.putShort((short) _task);
-        bb.put(_message);
+        ByteBuffer bb = ByteBuffer.allocate(message.length + 2);
+        bb.putShort((short) task);
+        bb.put(message);
         return bb;
     }
 
@@ -42,9 +42,9 @@ public class TaskMessage {
         if (packet == null) {
             return;
         }
-        _task = packet.getShort();
-        _message = new byte[packet.limit() - 2];
-        packet.get(_message);
+        task = packet.getShort();
+        message = new byte[packet.limit() - 2];
+        packet.get(message);
     }
 
 }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/TransportFactory.java b/storm-client/src/jvm/org/apache/storm/messaging/TransportFactory.java
index 9d3ec72..cc48eca 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/TransportFactory.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/TransportFactory.java
@@ -24,13 +24,13 @@ public class TransportFactory {
     public static IContext makeContext(Map<String, Object> topoConf) {
 
         //get factory class name
-        String transport_plugin_klassName = (String) topoConf.get(Config.STORM_MESSAGING_TRANSPORT);
-        LOG.info("Storm peer transport plugin:" + transport_plugin_klassName);
+        String transportPluginClassName = (String) topoConf.get(Config.STORM_MESSAGING_TRANSPORT);
+        LOG.info("Storm peer transport plugin:" + transportPluginClassName);
 
         IContext transport;
         try {
             //create a factory class
-            Class klass = Class.forName(transport_plugin_klassName);
+            Class klass = Class.forName(transportPluginClassName);
             //obtain a context object
             Object obj = klass.newInstance();
             if (obj instanceof IContext) {
@@ -45,7 +45,7 @@ public class TransportFactory {
                 transport = (IContext) method.invoke(obj, topoConf);
             }
         } catch (Exception e) {
-            throw new RuntimeException("Fail to construct messaging plugin from plugin " + transport_plugin_klassName, e);
+            throw new RuntimeException("Fail to construct messaging plugin from plugin " + transportPluginClassName, e);
         }
         return transport;
     }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/local/Context.java b/storm-client/src/jvm/org/apache/storm/messaging/local/Context.java
index 2737dfb..69552f8 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/local/Context.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/local/Context.java
@@ -13,7 +13,6 @@
 package org.apache.storm.messaging.local;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -38,7 +37,7 @@ import org.slf4j.LoggerFactory;
 
 public class Context implements IContext {
     private static final Logger LOG = LoggerFactory.getLogger(Context.class);
-    private final ConcurrentHashMap<String, LocalServer> _registry = new ConcurrentHashMap<>();
+    private final ConcurrentHashMap<String, LocalServer> registry = new ConcurrentHashMap<>();
 
     private static String getNodeKey(String nodeId, int port) {
         return nodeId + "-" + port;
@@ -47,7 +46,7 @@ public class Context implements IContext {
     private LocalServer createLocalServer(String nodeId, int port, IConnectionCallback cb) {
         String key = getNodeKey(nodeId, port);
         LocalServer ret = new LocalServer(port, cb);
-        LocalServer existing = _registry.put(key, ret);
+        LocalServer existing = registry.put(key, ret);
         if (existing != null) {
             //Can happen if worker is restarted in the same topology, e.g. due to blob update
             LOG.info("Replacing existing server for key {}", existing, ret, key);
@@ -61,13 +60,13 @@ public class Context implements IContext {
     }
 
     @Override
-    public IConnection bind(String storm_id, int port, IConnectionCallback cb, Supplier<Object> newConnectionResponse) {
-        return createLocalServer(storm_id, port, cb);
+    public IConnection bind(String stormId, int port, IConnectionCallback cb, Supplier<Object> newConnectionResponse) {
+        return createLocalServer(stormId, port, cb);
     }
 
     @Override
-    public IConnection connect(String storm_id, String host, int port, AtomicBoolean[] remoteBpStatus) {
-        return new LocalClient(storm_id, port);
+    public IConnection connect(String stormId, String host, int port, AtomicBoolean[] remoteBpStatus) {
+        return new LocalClient(stormId, port);
     }
 
     @Override
@@ -76,13 +75,13 @@ public class Context implements IContext {
     }
 
     private class LocalServer implements IConnection {
-        final ConcurrentHashMap<Integer, Double> _load = new ConcurrentHashMap<>();
+        final ConcurrentHashMap<Integer, Double> load = new ConcurrentHashMap<>();
         final int port;
-        final IConnectionCallback _cb;
+        final IConnectionCallback cb;
 
         public LocalServer(int port, IConnectionCallback cb) {
             this.port = port;
-            this._cb = cb;
+            this.cb = cb;
         }
         
         @Override
@@ -94,7 +93,7 @@ public class Context implements IContext {
         public Map<Integer, Load> getLoad(Collection<Integer> tasks) {
             Map<Integer, Load> ret = new HashMap<>();
             for (Integer task : tasks) {
-                Double found = _load.get(task);
+                Double found = load.get(task);
                 if (found != null) {
                     ret.put(task, new Load(true, found, 0));
                 }
@@ -104,7 +103,7 @@ public class Context implements IContext {
 
         @Override
         public void sendLoadMetrics(Map<Integer, Double> taskToLoad) {
-            _load.putAll(taskToLoad);
+            load.putAll(taskToLoad);
         }
 
         @Override
@@ -125,16 +124,16 @@ public class Context implements IContext {
 
     private class LocalClient implements IConnection {
         //Messages sent before the server registered a callback
-        private final LinkedBlockingQueue<TaskMessage> _pendingDueToUnregisteredServer;
-        private final ScheduledExecutorService _pendingFlusher;
+        private final LinkedBlockingQueue<TaskMessage> pendingDueToUnregisteredServer;
+        private final ScheduledExecutorService pendingFlusher;
         private final int port;
         private final String registryKey;
 
         public LocalClient(String stormId, int port) {
             this.port = port;
             this.registryKey = getNodeKey(stormId, port);
-            _pendingDueToUnregisteredServer = new LinkedBlockingQueue<>();
-            _pendingFlusher = Executors.newScheduledThreadPool(1, new ThreadFactory() {
+            pendingDueToUnregisteredServer = new LinkedBlockingQueue<>();
+            pendingFlusher = Executors.newScheduledThreadPool(1, new ThreadFactory() {
                 @Override
                 public Thread newThread(Runnable runnable) {
                     Thread thread = new Thread(runnable);
@@ -143,7 +142,7 @@ public class Context implements IContext {
                     return thread;
                 }
             });
-            _pendingFlusher.scheduleAtFixedRate(new Runnable() {
+            pendingFlusher.scheduleAtFixedRate(new Runnable() {
                 @Override
                 public void run() {
                     try {
@@ -159,34 +158,34 @@ public class Context implements IContext {
 
         private void flushPending() {
             //Can't cache server in client, server can change when workers restart.
-            LocalServer server = _registry.get(registryKey);
-            if (server != null && !_pendingDueToUnregisteredServer.isEmpty()) {
+            LocalServer server = registry.get(registryKey);
+            if (server != null && !pendingDueToUnregisteredServer.isEmpty()) {
                 ArrayList<TaskMessage> ret = new ArrayList<>();
-                _pendingDueToUnregisteredServer.drainTo(ret);
-                server._cb.recv(ret);
+                pendingDueToUnregisteredServer.drainTo(ret);
+                server.cb.recv(ret);
             }
         }
 
         @Override
         public void send(Iterator<TaskMessage> msgs) {
-            LocalServer server = _registry.get(registryKey);
+            LocalServer server = registry.get(registryKey);
             if (server != null) {
                 flushPending();
                 ArrayList<TaskMessage> ret = new ArrayList<>();
                 while (msgs.hasNext()) {
                     ret.add(msgs.next());
                 }
-                server._cb.recv(ret);
+                server.cb.recv(ret);
             } else {
                 while (msgs.hasNext()) {
-                    _pendingDueToUnregisteredServer.add(msgs.next());
+                    pendingDueToUnregisteredServer.add(msgs.next());
                 }
             }
         }
 
         @Override
         public Map<Integer, Load> getLoad(Collection<Integer> tasks) {
-            LocalServer server = _registry.get(registryKey);
+            LocalServer server = registry.get(registryKey);
             if (server != null) {
                 return server.getLoad(tasks);
             }
@@ -195,7 +194,7 @@ public class Context implements IContext {
 
         @Override
         public void sendLoadMetrics(Map<Integer, Double> taskToLoad) {
-            LocalServer server = _registry.get(registryKey);
+            LocalServer server = registry.get(registryKey);
             if (server != null) {
                 server.sendLoadMetrics(taskToLoad);
             }
@@ -213,9 +212,9 @@ public class Context implements IContext {
 
         @Override
         public void close() {
-            _pendingFlusher.shutdown();
+            pendingFlusher.shutdown();
             try {
-                _pendingFlusher.awaitTermination(5, TimeUnit.SECONDS);
+                pendingFlusher.awaitTermination(5, TimeUnit.SECONDS);
             } catch (InterruptedException e) {
                 throw new RuntimeException("Interrupted while awaiting flusher shutdown", e);
             }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/Client.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/Client.java
index e76f9d5..eac0f22 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/Client.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/Client.java
@@ -27,18 +27,14 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Supplier;
 import org.apache.storm.Config;
 import org.apache.storm.grouping.Load;
 import org.apache.storm.messaging.ConnectionWithStatus;
-import org.apache.storm.messaging.IConnectionCallback;
 import org.apache.storm.messaging.TaskMessage;
 import org.apache.storm.metric.api.IStatefulObject;
 import org.apache.storm.policy.IWaitStrategy;
-import org.apache.storm.policy.IWaitStrategy.WAIT_SITUATION;
+import org.apache.storm.policy.IWaitStrategy.WaitSituation;
 import org.apache.storm.policy.WaitStrategyProgressive;
-import org.apache.storm.serialization.KryoValuesDeserializer;
-import org.apache.storm.serialization.KryoValuesSerializer;
 import org.apache.storm.shade.io.netty.bootstrap.Bootstrap;
 import org.apache.storm.shade.io.netty.buffer.PooledByteBufAllocator;
 import org.apache.storm.shade.io.netty.channel.Channel;
@@ -166,7 +162,7 @@ public class Client extends ConnectionWithStatus implements IStatefulObject, ISa
         } else {
             waitStrategy = ReflectionUtils.newInstance(clazz);
         }
-        waitStrategy.prepare(topoConf, WAIT_SITUATION.BACK_PRESSURE_WAIT);
+        waitStrategy.prepare(topoConf, WaitSituation.BACK_PRESSURE_WAIT);
     }
 
     /**
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/Context.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/Context.java
index ca46c4f..03feaf8 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/Context.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/Context.java
@@ -34,7 +34,7 @@ public class Context implements IContext {
     private HashedWheelTimer clientScheduleService;
 
     /**
-     * initialization per Storm configuration
+     * initialization per Storm configuration.
      */
     @Override
     public void prepare(Map<String, Object> topoConf) {
@@ -52,26 +52,26 @@ public class Context implements IContext {
     }
 
     /**
-     * establish a server with a binding port
+     * establish a server with a binding port.
      */
     @Override
-    public synchronized IConnection bind(String storm_id, int port, IConnectionCallback cb, Supplier<Object> newConnectionResponse) {
+    public synchronized IConnection bind(String stormId, int port, IConnectionCallback cb, Supplier<Object> newConnectionResponse) {
         Server server = new Server(topoConf, port, cb, newConnectionResponse);
         serverConnections.add(server);
         return server;
     }
 
     /**
-     * establish a connection to a remote server
+     * establish a connection to a remote server.
      */
     @Override
-    public IConnection connect(String storm_id, String host, int port, AtomicBoolean[] remoteBpStatus) {
+    public IConnection connect(String stormId, String host, int port, AtomicBoolean[] remoteBpStatus) {
         return new Client(topoConf, remoteBpStatus, workerEventLoopGroup,
                                         clientScheduleService, host, port);
     }
 
     /**
-     * terminate this context
+     * terminate this context.
      */
     @Override
     public synchronized void term() {
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/ControlMessage.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/ControlMessage.java
index 3836faf..beb4515 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/ControlMessage.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/ControlMessage.java
@@ -31,6 +31,7 @@ public enum ControlMessage implements INettySerializable {
     }
 
     /**
+     * Create message.
      * @param encoded status code
      * @return a control message per an encoded status code
      */
@@ -44,11 +45,11 @@ public enum ControlMessage implements INettySerializable {
     }
 
     public static ControlMessage read(byte[] serial) {
-        ByteBuf cm_buffer = Unpooled.wrappedBuffer(serial);
-        try{
-        return mkMessage(cm_buffer.getShort(0));
+        ByteBuf cmBuffer = Unpooled.wrappedBuffer(serial);
+        try {
+            return mkMessage(cmBuffer.getShort(0));
         } finally {
-            cm_buffer.release();
+            cmBuffer.release();
         }
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslClientHandler.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslClientHandler.java
index 5edaf95..450dc69 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslClientHandler.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslClientHandler.java
@@ -24,22 +24,22 @@ public class KerberosSaslClientHandler extends ChannelInboundHandlerAdapter {
 
     private static final Logger LOG = LoggerFactory
         .getLogger(KerberosSaslClientHandler.class);
-    private final long start_time;
+    private final long startTime;
     private final ISaslClient client;
     /**
      * Used for client or server's token to send or receive from each other.
      */
     private final Map<String, Object> topoConf;
-    private final String jaas_section;
+    private final String jaasSection;
     private final String host;
 
-    public KerberosSaslClientHandler(ISaslClient client, Map<String, Object> topoConf, String jaas_section, String host) throws
+    public KerberosSaslClientHandler(ISaslClient client, Map<String, Object> topoConf, String jaasSection, String host) throws
         IOException {
         this.client = client;
         this.topoConf = topoConf;
-        this.jaas_section = jaas_section;
+        this.jaasSection = jaasSection;
         this.host = host;
-        start_time = System.currentTimeMillis();
+        startTime = System.currentTimeMillis();
     }
 
     @Override
@@ -56,7 +56,7 @@ public class KerberosSaslClientHandler extends ChannelInboundHandlerAdapter {
             if (saslNettyClient == null) {
                 LOG.debug("Creating saslNettyClient now for channel: {}",
                           channel);
-                saslNettyClient = new KerberosSaslNettyClient(topoConf, jaas_section, host);
+                saslNettyClient = new KerberosSaslNettyClient(topoConf, jaasSection, host);
                 channel.attr(KerberosSaslNettyClientState.KERBEROS_SASL_NETTY_CLIENT).set(saslNettyClient);
             }
             LOG.debug("Going to initiate Kerberos negotiations.");
@@ -71,7 +71,7 @@ public class KerberosSaslClientHandler extends ChannelInboundHandlerAdapter {
 
     @Override
     public void channelRead(ChannelHandlerContext ctx, Object message) throws Exception {
-        LOG.debug("send/recv time (ms): {}", (System.currentTimeMillis() - start_time));
+        LOG.debug("send/recv time (ms): {}", (System.currentTimeMillis() - startTime));
 
         // examine the response message from server
         if (message instanceof ControlMessage) {
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyClient.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyClient.java
index 352c5b3..c4d15fc 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyClient.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyClient.java
@@ -45,22 +45,22 @@ public class KerberosSaslNettyClient {
      */
     private SaslClient saslClient;
     private Subject subject;
-    private String jaas_section;
+    private String jaasSection;
 
     /**
      * Create a KerberosSaslNettyClient for authentication with servers.
      */
-    public KerberosSaslNettyClient(Map<String, Object> topoConf, String jaas_section, String host) {
+    public KerberosSaslNettyClient(Map<String, Object> topoConf, String jaasSection, String host) {
         LOG.debug("KerberosSaslNettyClient: Creating SASL {} client to authenticate to server ",
                   SaslUtils.KERBEROS);
 
         LOG.info("Creating Kerberos Client.");
 
-        Configuration login_conf;
+        Configuration loginConf;
         try {
-            login_conf = ClientAuthUtils.getConfiguration(topoConf);
+            loginConf = ClientAuthUtils.getConfiguration(topoConf);
         } catch (Throwable t) {
-            LOG.error("Failed to get login_conf: ", t);
+            LOG.error("Failed to get loginConf: ", t);
             throw t;
         }
         LOG.debug("KerberosSaslNettyClient: authmethod {}", SaslUtils.KERBEROS);
@@ -69,12 +69,12 @@ public class KerberosSaslNettyClient {
 
         subject = null;
         try {
-            LOG.debug("Setting Configuration to login_config: {}", login_conf);
+            LOG.debug("Setting Configuration to login_config: {}", loginConf);
             //specify a configuration object to be used
-            Configuration.setConfiguration(login_conf);
+            Configuration.setConfiguration(loginConf);
             //now login
             LOG.debug("Trying to login.");
-            Login login = new Login(jaas_section, ch);
+            Login login = new Login(jaasSection, ch);
             subject = login.getSubject();
             LOG.debug("Got Subject: {}", subject.toString());
         } catch (LoginException ex) {
@@ -85,15 +85,15 @@ public class KerberosSaslNettyClient {
         //check the credential of our principal
         if (subject.getPrivateCredentials(KerberosTicket.class).isEmpty()) {
             LOG.error("Failed to verify user principal.");
-            throw new RuntimeException("Fail to verify user principal with section \"" +
-                                       jaas_section +
-                                       "\" in login configuration file " +
-                                       login_conf);
+            throw new RuntimeException("Fail to verify user principal with section \""
+                    + jaasSection
+                    + "\" in login configuration file "
+                    + loginConf);
         }
 
         String serviceName = null;
         try {
-            serviceName = ClientAuthUtils.get(login_conf, jaas_section, "serviceName");
+            serviceName = ClientAuthUtils.get(loginConf, jaasSection, "serviceName");
         } catch (IOException e) {
             LOG.error("Failed to get service name.", e);
             throw new RuntimeException(e);
@@ -176,7 +176,6 @@ public class KerberosSaslNettyClient {
          * Implementation used to respond to SASL tokens from server.
          *
          * @param callbacks objects that indicate what credential information the server's SaslServer requires from the client.
-         * @throws UnsupportedCallbackException
          */
         @Override
         public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyClientState.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyClientState.java
index 29746ff..6c923b5 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyClientState.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyClientState.java
@@ -16,6 +16,7 @@ import org.apache.storm.shade.io.netty.util.AttributeKey;
 
 final class KerberosSaslNettyClientState {
 
-    public static final AttributeKey<KerberosSaslNettyClient> KERBEROS_SASL_NETTY_CLIENT = AttributeKey.valueOf("kerberos.sasl.netty.client");
+    public static final AttributeKey<KerberosSaslNettyClient> KERBEROS_SASL_NETTY_CLIENT =
+            AttributeKey.valueOf("kerberos.sasl.netty.client");
 
 }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServer.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServer.java
index 466ee6d..e7f0c14 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServer.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServer.java
@@ -47,14 +47,14 @@ class KerberosSaslNettyServer {
     private Subject subject;
     private List<String> authorizedUsers;
 
-    KerberosSaslNettyServer(Map<String, Object> topoConf, String jaas_section, List<String> authorizedUsers) {
+    KerberosSaslNettyServer(Map<String, Object> topoConf, String jaasSection, List<String> authorizedUsers) {
         this.authorizedUsers = authorizedUsers;
         LOG.debug("Getting Configuration.");
-        Configuration login_conf;
+        Configuration loginConf;
         try {
-            login_conf = ClientAuthUtils.getConfiguration(topoConf);
+            loginConf = ClientAuthUtils.getConfiguration(topoConf);
         } catch (Throwable t) {
-            LOG.error("Failed to get login_conf: ", t);
+            LOG.error("Failed to get loginConf: ", t);
             throw t;
         }
 
@@ -65,12 +65,12 @@ class KerberosSaslNettyServer {
         //login our principal
         subject = null;
         try {
-            LOG.debug("Setting Configuration to login_config: {}", login_conf);
+            LOG.debug("Setting Configuration to login_config: {}", loginConf);
             //specify a configuration object to be used
-            Configuration.setConfiguration(login_conf);
+            Configuration.setConfiguration(loginConf);
             //now login
             LOG.debug("Trying to login.");
-            Login login = new Login(jaas_section, ch);
+            Login login = new Login(jaasSection, ch);
             subject = login.getSubject();
             LOG.debug("Got Subject: {}", subject.toString());
         } catch (LoginException ex) {
@@ -82,19 +82,19 @@ class KerberosSaslNettyServer {
         if (subject.getPrivateCredentials(KerberosTicket.class).isEmpty()) {
             LOG.error("Failed to verifyuser principal.");
             throw new RuntimeException("Fail to verify user principal with section \""
-                                       + jaas_section
+                                       + jaasSection
                                        + "\" in login configuration file "
-                                       + login_conf);
+                                       + loginConf);
         }
 
         try {
             LOG.info("Creating Kerberos Server.");
             final CallbackHandler fch = ch;
             Principal p = (Principal) subject.getPrincipals().toArray()[0];
-            KerberosName kName = new KerberosName(p.getName());
-            final String fHost = kName.getHostName();
-            final String fServiceName = kName.getServiceName();
-            LOG.debug("Server with host: {}", fHost);
+            KerberosName kerberosName = new KerberosName(p.getName());
+            final String hostName = kerberosName.getHostName();
+            final String serviceName = kerberosName.getServiceName();
+            LOG.debug("Server with host: {}", hostName);
             saslServer =
                 Subject.doAs(subject, new PrivilegedExceptionAction<SaslServer>() {
                     @Override
@@ -104,8 +104,8 @@ class KerberosSaslNettyServer {
                             props.put(Sasl.QOP, "auth");
                             props.put(Sasl.SERVER_AUTH, "false");
                             return Sasl.createSaslServer(SaslUtils.KERBEROS,
-                                                         fServiceName,
-                                                         fHost, props, fch);
+                                                         serviceName,
+                                                         hostName, props, fch);
                         } catch (Exception e) {
                             LOG.error("Subject failed to create sasl server.", e);
                             return null;
@@ -159,12 +159,12 @@ class KerberosSaslNettyServer {
     }
 
     /**
-     * CallbackHandler for SASL DIGEST-MD5 mechanism
+     * CallbackHandler for SASL DIGEST-MD5 mechanism.
      */
     public static class KerberosSaslCallbackHandler implements CallbackHandler {
 
         /**
-         * Used to authenticate the clients
+         * Used to authenticate the clients.
          */
         private List<String> authorizedUsers;
 
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServerState.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServerState.java
index 1816ffc..77cb864 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServerState.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServerState.java
@@ -16,5 +16,6 @@ import org.apache.storm.shade.io.netty.util.AttributeKey;
 
 final class KerberosSaslNettyServerState {
 
-    public static final AttributeKey<KerberosSaslNettyServer> KERBOROS_SASL_NETTY_SERVER = AttributeKey.valueOf("kerboros.sasl.netty.server");
+    public static final AttributeKey<KerberosSaslNettyServer> KERBOROS_SASL_NETTY_SERVER =
+            AttributeKey.valueOf("kerboros.sasl.netty.server");
 }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslServerHandler.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslServerHandler.java
index 0356538..c227448 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslServerHandler.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslServerHandler.java
@@ -30,14 +30,14 @@ public class KerberosSaslServerHandler extends ChannelInboundHandlerAdapter {
      * Used for client or server's token to send or receive from each other.
      */
     private final Map<String, Object> topoConf;
-    private final String jaas_section;
+    private final String jaasSection;
     private final List<String> authorizedUsers;
 
-    public KerberosSaslServerHandler(ISaslServer server, Map<String, Object> topoConf, String jaas_section,
+    public KerberosSaslServerHandler(ISaslServer server, Map<String, Object> topoConf, String jaasSection,
                                      List<String> authorizedUsers) throws IOException {
         this.server = server;
         this.topoConf = topoConf;
-        this.jaas_section = jaas_section;
+        this.jaasSection = jaasSection;
         this.authorizedUsers = authorizedUsers;
     }
 
@@ -60,7 +60,7 @@ public class KerberosSaslServerHandler extends ChannelInboundHandlerAdapter {
                 if (saslNettyServer == null) {
                     LOG.debug("No saslNettyServer for {}  yet; creating now, with topology token: ", channel);
                     try {
-                        saslNettyServer = new KerberosSaslNettyServer(topoConf, jaas_section, authorizedUsers);
+                        saslNettyServer = new KerberosSaslNettyServer(topoConf, jaasSection, authorizedUsers);
                         channel.attr(KerberosSaslNettyServerState.KERBOROS_SASL_NETTY_SERVER).set(saslNettyServer);
                     } catch (RuntimeException ioe) {
                         LOG.error("Error occurred while creating saslNettyServer on server {} for client {}",
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/Login.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/Login.java
index f9e42f0..6a50d84 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/Login.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/Login.java
@@ -40,19 +40,21 @@ public class Login {
     // and try to renew the ticket.
     private static final float TICKET_RENEW_WINDOW = 0.80f;
     /**
-     * Percentage of random jitter added to the renewal time
+     * Percentage of random jitter added to the renewal time.
      */
     private static final float TICKET_RENEW_JITTER = 0.05f;
     // Regardless of TICKET_RENEW_WINDOW setting above and the ticket expiry time,
     // thread will not sleep between refresh attempts any less than 1 minute (60*1000 milliseconds = 1 minute).
     // Change the '1' to e.g. 5, to change this to 5 minutes.
     private static final long MIN_TIME_BEFORE_RELOGIN = 1 * 60 * 1000L;
-    /** Random number generator */
+    /**
+     * Random number generator.
+     */
     private static Random rng = new Random();
     public CallbackHandler callbackHandler;
-    Logger LOG = Logger.getLogger(Login.class);
+    private static final Logger LOG = Logger.getLogger(Login.class);
     private Subject subject = null;
-    private Thread t = null;
+    private Thread thread = null;
     private boolean isKrbTicket = false;
     private boolean isUsingTicketCache = false;
     private boolean isUsingKeytab = false;
@@ -82,7 +84,7 @@ public class Login {
         this.loginContextName = loginContextName;
         subject = login.getSubject();
         isKrbTicket = !subject.getPrivateCredentials(KerberosTicket.class).isEmpty();
-        AppConfigurationEntry entries[] = Configuration.getConfiguration().getAppConfigurationEntry(loginContextName);
+        AppConfigurationEntry[] entries = Configuration.getConfiguration().getAppConfigurationEntry(loginContextName);
         for (AppConfigurationEntry entry : entries) {
             // there will only be a single entry, so this for() loop will only be iterated through once.
             if (entry.getOptions().get("useTicketCache") != null) {
@@ -110,7 +112,7 @@ public class Login {
         // TGT's existing expiry date and the configured MIN_TIME_BEFORE_RELOGIN. For testing and development,
         // you can decrease the interval of expiration of tickets (for example, to 3 minutes) by running :
         //  "modprinc -maxlife 3mins <principal>" in kadmin.
-        t = new Thread(new Runnable() {
+        thread = new Thread(new Runnable() {
             @Override
             public void run() {
                 LOG.info("TGT refresh thread started.");
@@ -128,21 +130,21 @@ public class Login {
                         long expiry = tgt.getEndTime().getTime();
                         Date expiryDate = new Date(expiry);
                         if ((isUsingTicketCache) && (tgt.getEndTime().equals(tgt.getRenewTill()))) {
-                            LOG.error("The TGT cannot be renewed beyond the next expiry date: " + expiryDate + "." +
-                                      "This process will not be able to authenticate new SASL connections after that " +
-                                      "time (for example, it will not be authenticate a new connection with a Zookeeper " +
-                                      "Quorum member).  Ask your system administrator to either increase the " +
-                                      "'renew until' time by doing : 'modprinc -maxrenewlife " + principal + "' within " +
-                                      "kadmin, or instead, to generate a keytab for " + principal + ". Because the TGT's " +
-                                      "expiry cannot be further extended by refreshing, exiting refresh thread now.");
+                            LOG.error("The TGT cannot be renewed beyond the next expiry date: " + expiryDate + "."
+                                    + "This process will not be able to authenticate new SASL connections after that "
+                                    + "time (for example, it will not be authenticate a new connection with a Zookeeper "
+                                    + "Quorum member).  Ask your system administrator to either increase the "
+                                    + "'renew until' time by doing : 'modprinc -maxrenewlife " + principal + "' within "
+                                    + "kadmin, or instead, to generate a keytab for " + principal + ". Because the TGT's "
+                                    + "expiry cannot be further extended by refreshing, exiting refresh thread now.");
                             return;
                         }
                         // determine how long to sleep from looking at ticket's expiry.
                         // We should not allow the ticket to expire, but we should take into consideration
                         // MIN_TIME_BEFORE_RELOGIN. Will not sleep less than MIN_TIME_BEFORE_RELOGIN, unless doing so
                         // would cause ticket expiration.
-                        if ((nextRefresh > expiry) ||
-                            ((now + MIN_TIME_BEFORE_RELOGIN) > expiry)) {
+                        if ((nextRefresh > expiry)
+                                || ((now + MIN_TIME_BEFORE_RELOGIN) > expiry)) {
                             // expiry is before next scheduled refresh).
                             nextRefresh = now;
                         } else {
@@ -160,8 +162,9 @@ public class Login {
                     if (tgt != null && now > tgt.getEndTime().getTime()) {
                         if ((now - tgt.getEndTime().getTime()) < (10 * MIN_TIME_BEFORE_RELOGIN)) {
                             Date until = new Date(now + MIN_TIME_BEFORE_RELOGIN);
-                            LOG.info("TGT already expired but giving additional 10 minutes past TGT expiry, refresh sleeping until: " +
-                                     until.toString());
+                            LOG.info("TGT already expired but giving additional 10 minutes past TGT expiry, refresh "
+                                    + "sleeping until: "
+                                    + until.toString());
                             try {
                                 Thread.sleep(MIN_TIME_BEFORE_RELOGIN);
                             } catch (InterruptedException ie) {
@@ -244,22 +247,22 @@ public class Login {
                 }
             }
         });
-        t.setName("Refresh-TGT");
-        t.setDaemon(true);
+        thread.setName("Refresh-TGT");
+        thread.setDaemon(true);
     }
 
     public void startThreadIfNeeded() {
-        // thread object 't' will be null if a refresh thread is not needed.
-        if (t != null) {
-            t.start();
+        // thread object 'thread' will be null if a refresh thread is not needed.
+        if (thread != null) {
+            thread.start();
         }
     }
 
     public void shutdown() {
-        if ((t != null) && (t.isAlive())) {
-            t.interrupt();
+        if ((thread != null) && (thread.isAlive())) {
+            thread.interrupt();
             try {
-                t.join();
+                thread.join();
             } catch (InterruptedException e) {
                 LOG.warn("error while waiting for Login thread to shutdown: " + e);
             }
@@ -276,11 +279,11 @@ public class Login {
 
     private synchronized LoginContext login(final String loginContextName) throws LoginException {
         if (loginContextName == null) {
-            throw new LoginException("loginContext name (JAAS file section header) was null. " +
-                                     "Please check your java.security.login.auth.config (=" +
-                                     System.getProperty("java.security.login.auth.config") +
-                                     ") and your " + ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY + "(=" +
-                                     System.getProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, "Client") + ")");
+            throw new LoginException("loginContext name (JAAS file section header) was null. "
+                    + "Please check your java.security.login.auth.config (="
+                    + System.getProperty("java.security.login.auth.config")
+                    + ") and your " + ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY + "(="
+                    + System.getProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, "Client") + ")");
         }
         LoginContext loginContext = new LoginContext(loginContextName, callbackHandler);
         loginContext.login();
@@ -294,8 +297,8 @@ public class Login {
         long expires = tgt.getEndTime().getTime();
         LOG.info("TGT valid starting at:        " + tgt.getStartTime().toString());
         LOG.info("TGT expires:                  " + tgt.getEndTime().toString());
-        long proposedRefresh = start + (long) ((expires - start) *
-                                               (TICKET_RENEW_WINDOW + (TICKET_RENEW_JITTER * rng.nextDouble())));
+        long proposedRefresh = start + (long) ((expires - start)
+                * (TICKET_RENEW_WINDOW + (TICKET_RENEW_JITTER * rng.nextDouble())));
         if (proposedRefresh > expires) {
             // proposedRefresh is too far in the future: it's after ticket expires: simply return now.
             return System.currentTimeMillis();
@@ -304,6 +307,7 @@ public class Login {
         }
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private synchronized KerberosTicket getTGT() {
         Set<KerberosTicket> tickets = subject.getPrivateCredentials(KerberosTicket.class);
         for (KerberosTicket ticket : tickets) {
@@ -319,9 +323,8 @@ public class Login {
     private void sleepUntilSufficientTimeElapsed() {
         long now = System.currentTimeMillis();
         if (now - getLastLogin() < MIN_TIME_BEFORE_RELOGIN) {
-            LOG.warn("Not attempting to re-login since the last re-login was " +
-                     "attempted less than " + (MIN_TIME_BEFORE_RELOGIN / 1000) + " seconds" +
-                     " before.");
+            LOG.warn("Not attempting to re-login since the last re-login was "
+                    + "attempted less than " + (MIN_TIME_BEFORE_RELOGIN / 1000) + " seconds before.");
             try {
                 Thread.sleep(MIN_TIME_BEFORE_RELOGIN - (now - getLastLogin()));
             } catch (InterruptedException e) {
@@ -334,7 +337,7 @@ public class Login {
     }
 
     /**
-     * Returns login object
+     * Returns login object.
      * @return login
      */
     private LoginContext getLogin() {
@@ -342,8 +345,7 @@ public class Login {
     }
 
     /**
-     * Set the login object
-     * @param login
+     * Set the login object.
      */
     private void setLogin(LoginContext login) {
         this.login = login;
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/MessageBatch.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/MessageBatch.java
index e29d43c..6b15788 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/MessageBatch.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/MessageBatch.java
@@ -18,14 +18,14 @@ import org.apache.storm.shade.io.netty.buffer.ByteBuf;
 
 class MessageBatch implements INettySerializable {
 
-    private final int buffer_size;
+    private final int bufferSize;
     private final ArrayList<TaskMessage> msgs;
-    private int encoded_length;
+    private int encodedLength;
 
-    MessageBatch(int buffer_size) {
-        this.buffer_size = buffer_size;
+    MessageBatch(int bufferSize) {
+        this.bufferSize = bufferSize;
         msgs = new ArrayList<>();
-        encoded_length = ControlMessage.EOB_MESSAGE.encodeLength();
+        encodedLength = ControlMessage.EOB_MESSAGE.encodeLength();
     }
 
     void add(TaskMessage msg) {
@@ -34,7 +34,7 @@ class MessageBatch implements INettySerializable {
         }
 
         msgs.add(msg);
-        encoded_length += msgEncodeLength(msg);
+        encodedLength += msgEncodeLength(msg);
     }
 
     private int msgEncodeLength(TaskMessage taskMsg) {
@@ -50,13 +50,15 @@ class MessageBatch implements INettySerializable {
     }
 
     /**
+     * Check whether full.
      * @return true if this batch used up allowed buffer size
      */
     boolean isFull() {
-        return encoded_length >= buffer_size;
+        return encodedLength >= bufferSize;
     }
 
     /**
+     * Check whether empty.
      * @return true if this batch doesn't have any messages
      */
     boolean isEmpty() {
@@ -64,6 +66,7 @@ class MessageBatch implements INettySerializable {
     }
 
     /**
+     * Get size.
      * @return number of msgs in this batch
      */
     int size() {
@@ -72,11 +75,11 @@ class MessageBatch implements INettySerializable {
 
     @Override
     public int encodeLength() {
-        return encoded_length;
+        return encodedLength;
     }
     
     /**
-     * create a buffer containing the encoding of this batch
+     * create a buffer containing the encoding of this batch.
      */
     @Override
     public void write(ByteBuf dest) {
@@ -89,24 +92,24 @@ class MessageBatch implements INettySerializable {
     }
 
     /**
-     * write a TaskMessage into a buffer
+     * write a TaskMessage into a buffer.
      *
-     * Each TaskMessage is encoded as: task ... short(2) len ... int(4) payload ... byte[]     *
+     * <p>Each TaskMessage is encoded as: task ... short(2) len ... int(4) payload ... byte[]     *
      */
     private void writeTaskMessage(ByteBuf buf, TaskMessage message) {
-        int payload_len = 0;
+        int payloadLen = 0;
         if (message.message() != null) {
-            payload_len = message.message().length;
+            payloadLen = message.message().length;
         }
 
-        int task_id = message.task();
-        if (task_id > Short.MAX_VALUE) {
+        int taskId = message.task();
+        if (taskId > Short.MAX_VALUE) {
             throw new RuntimeException("Task ID should not exceed " + Short.MAX_VALUE);
         }
 
-        buf.writeShort((short) task_id);
-        buf.writeInt(payload_len);
-        if (payload_len > 0) {
+        buf.writeShort((short) taskId);
+        buf.writeInt(payloadLen);
+        if (payloadLen > 0) {
             buf.writeBytes(message.message());
         }
     }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/MessageDecoder.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/MessageDecoder.java
index fcd8f0e..bced87c 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/MessageDecoder.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/MessageDecoder.java
@@ -61,13 +61,13 @@ public class MessageDecoder extends ByteToMessageDecoder {
             available -= 2;
 
             // case 1: Control message
-            ControlMessage ctrl_msg = ControlMessage.mkMessage(code);
-            if (ctrl_msg != null) {
+            ControlMessage controlMessage = ControlMessage.mkMessage(code);
+            if (controlMessage != null) {
 
-                if (ctrl_msg == ControlMessage.EOB_MESSAGE) {
+                if (controlMessage == ControlMessage.EOB_MESSAGE) {
                     continue;
                 } else {
-                    out.add(ctrl_msg);
+                    out.add(controlMessage);
                     return;
                 }
             }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslMessageToken.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslMessageToken.java
index 669c88a..368ffba 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslMessageToken.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslMessageToken.java
@@ -22,12 +22,7 @@ import org.slf4j.LoggerFactory;
  */
 public class SaslMessageToken implements INettySerializable {
     public static final short IDENTIFIER = -500;
-
-    /**
-     * Class logger
-     */
-    private static final Logger LOG = LoggerFactory
-        .getLogger(SaslMessageToken.class);
+    private static final Logger LOG = LoggerFactory.getLogger(SaslMessageToken.class);
 
     /**
      * Used for client or server's token to send or receive from each other.
@@ -50,23 +45,23 @@ public class SaslMessageToken implements INettySerializable {
     }
 
     public static SaslMessageToken read(byte[] serial) {
-        ByteBuf sm_buffer = Unpooled.wrappedBuffer(serial);
+        ByteBuf smBuffer = Unpooled.wrappedBuffer(serial);
         try {
-        short identifier = sm_buffer.readShort();
-        int payload_len = sm_buffer.readInt();
-        if (identifier != IDENTIFIER) {
-            return null;
-        }
-        byte token[] = new byte[payload_len];
-        sm_buffer.readBytes(token, 0, payload_len);
-        return new SaslMessageToken(token);
+            short identifier = smBuffer.readShort();
+            int payloadLen = smBuffer.readInt();
+            if (identifier != IDENTIFIER) {
+                return null;
+            }
+            byte[] token = new byte[payloadLen];
+            smBuffer.readBytes(token, 0, payloadLen);
+            return new SaslMessageToken(token);
         } finally {
-            sm_buffer.release();
+            smBuffer.release();
         }
     }
 
     /**
-     * Read accessor for SASL token
+     * Read accessor for SASL token.
      *
      * @return saslToken SASL token
      */
@@ -75,7 +70,7 @@ public class SaslMessageToken implements INettySerializable {
     }
 
     /**
-     * Write accessor for SASL token
+     * Write accessor for SASL token.
      *
      * @param token SASL token
      */
@@ -96,15 +91,15 @@ public class SaslMessageToken implements INettySerializable {
      */
     @Override
     public void write(ByteBuf dest) {
-        int payload_len = 0;
+        int payloadLen = 0;
         if (token != null) {
-            payload_len = token.length;
+            payloadLen = token.length;
         }
 
         dest.writeShort(IDENTIFIER);
-        dest.writeInt(payload_len);
+        dest.writeInt(payloadLen);
 
-        if (payload_len > 0) {
+        if (payloadLen > 0) {
             dest.writeBytes(token);
         }
     }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyClient.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyClient.java
index 11f487d..fe4de08 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyClient.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyClient.java
@@ -85,11 +85,11 @@ public class SaslNettyClient {
      */
     private static class SaslClientCallbackHandler implements CallbackHandler {
         /**
-         * Generated username contained in TopologyToken
+         * Generated username contained in TopologyToken.
          */
         private final String userName;
         /**
-         * Generated password contained in TopologyToken
+         * Generated password contained in TopologyToken.
          */
         private final char[] userPassword;
 
@@ -106,7 +106,6 @@ public class SaslNettyClient {
          * Implementation used to respond to SASL tokens from server.
          *
          * @param callbacks objects that indicate what credential information the server's SaslServer requires from the client.
-         * @throws UnsupportedCallbackException
          */
         @Override
         public void handle(Callback[] callbacks)
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyServer.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyServer.java
index 06763e2..6c04d76 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyServer.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyServer.java
@@ -78,12 +78,12 @@ class SaslNettyServer {
     }
 
     /**
-     * CallbackHandler for SASL DIGEST-MD5 mechanism
+     * CallbackHandler for SASL DIGEST-MD5 mechanism.
      */
     public static class SaslDigestCallbackHandler implements CallbackHandler {
 
         /**
-         * Used to authenticate the clients
+         * Used to authenticate the clients.
          */
         private byte[] userPassword;
         private String userName;
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslStormClientHandler.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslStormClientHandler.java
index 25b0aa2..f1191a2 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslStormClientHandler.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslStormClientHandler.java
@@ -24,7 +24,7 @@ public class SaslStormClientHandler extends ChannelInboundHandlerAdapter {
 
     private static final Logger LOG = LoggerFactory
         .getLogger(SaslStormClientHandler.class);
-    private final long start_time;
+    private final long startTime;
     private final ISaslClient client;
     /**
      * Used for client or server's token to send or receive from each other.
@@ -34,7 +34,7 @@ public class SaslStormClientHandler extends ChannelInboundHandlerAdapter {
 
     public SaslStormClientHandler(ISaslClient client) throws IOException {
         this.client = client;
-        start_time = System.currentTimeMillis();
+        startTime = System.currentTimeMillis();
         getSASLCredentials();
     }
 
@@ -64,7 +64,7 @@ public class SaslStormClientHandler extends ChannelInboundHandlerAdapter {
     @Override
     public void channelRead(ChannelHandlerContext ctx, Object message) throws Exception {
         LOG.debug("send/recv time (ms): {}",
-                  (System.currentTimeMillis() - start_time));
+                  (System.currentTimeMillis() - startTime));
 
         // examine the response message from server
         if (message instanceof ControlMessage) {
@@ -145,6 +145,7 @@ public class SaslStormClientHandler extends ChannelInboundHandlerAdapter {
         channel.writeAndFlush(saslResponse, channel.voidPromise());
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private void getSASLCredentials() throws IOException {
         String secretKey;
         name = client.name();
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslStormServerHandler.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslStormServerHandler.java
index ce69a6f..181667b 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslStormServerHandler.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslStormServerHandler.java
@@ -128,6 +128,7 @@ public class SaslStormServerHandler extends ChannelInboundHandlerAdapter {
         ctx.close();
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private void getSASLCredentials() throws IOException {
         String secretKey;
         topologyName = server.name();
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/Server.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/Server.java
index a3cd8b0..8a37d1e 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/Server.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/Server.java
@@ -67,7 +67,7 @@ class Server extends ConnectionWithStatus implements IStatefulObject, ISaslServe
     private volatile boolean closing = false;
 
     /**
-     * Starts Netty at the given port
+     * Starts Netty at the given port.
      * @param topoConf The topology config
      * @param port The port to start Netty at
      * @param cb The callback to deliver incoming messages to
@@ -81,8 +81,7 @@ class Server extends ConnectionWithStatus implements IStatefulObject, ISaslServe
         this.newConnectionResponse = newConnectionResponse;
 
         // Configure the server.
-        int buffer_size = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_BUFFER_SIZE));
-        int backlog = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_SOCKET_BACKLOG), 500);
+        int bufferSize = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_BUFFER_SIZE));
         int maxWorkers = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_SERVER_WORKER_THREADS));
 
         ThreadFactory bossFactory = new NettyRenameThreadFactory(netty_name() + "-boss");
@@ -93,15 +92,16 @@ class Server extends ConnectionWithStatus implements IStatefulObject, ISaslServe
         // https://github.com/netty/netty/blob/netty-4.1.24.Final/transport/src/main/java/io/netty/channel/MultithreadEventLoopGroup.java#L40
         this.workerEventLoopGroup = new NioEventLoopGroup(maxWorkers > 0 ? maxWorkers : 0, workerFactory);
 
-        LOG.info("Create Netty Server " + netty_name() + ", buffer_size: " + buffer_size + ", maxWorkers: " + maxWorkers);
+        LOG.info("Create Netty Server " + netty_name() + ", buffer_size: " + bufferSize + ", maxWorkers: " + maxWorkers);
 
+        int backlog = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_SOCKET_BACKLOG), 500);
         bootstrap = new ServerBootstrap()
             .group(bossEventLoopGroup, workerEventLoopGroup)
             .channel(NioServerSocketChannel.class)
             .option(ChannelOption.SO_REUSEADDR, true)
             .option(ChannelOption.SO_BACKLOG, backlog)
             .childOption(ChannelOption.TCP_NODELAY, true)
-            .childOption(ChannelOption.SO_RCVBUF, buffer_size)
+            .childOption(ChannelOption.SO_RCVBUF, bufferSize)
             .childOption(ChannelOption.SO_KEEPALIVE, true)
             .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
             .childHandler(new StormServerPipelineFactory(topoConf, this));
@@ -136,9 +136,7 @@ class Server extends ConnectionWithStatus implements IStatefulObject, ISaslServe
     }
 
     /**
-     * enqueue a received message
-     *
-     * @throws InterruptedException
+     * enqueue a received message.
      */
     protected void enqueue(List<TaskMessage> msgs, String from) throws InterruptedException {
         if (null == msgs || msgs.isEmpty() || closing) {
@@ -154,7 +152,7 @@ class Server extends ConnectionWithStatus implements IStatefulObject, ISaslServe
     }
 
     /**
-     * close all channels, and release resources
+     * close all channels, and release resources.
      */
     @Override
     public void close() {
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/StormClientHandler.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/StormClientHandler.java
index 90d03d6..a3837a8 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/StormClientHandler.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/StormClientHandler.java
@@ -27,13 +27,13 @@ import org.slf4j.LoggerFactory;
 public class StormClientHandler extends ChannelInboundHandlerAdapter {
     private static final Logger LOG = LoggerFactory.getLogger(StormClientHandler.class);
     private final Client client;
-    private final KryoValuesDeserializer _des;
+    private final KryoValuesDeserializer des;
     private final AtomicBoolean[] remoteBpStatus;
 
     StormClientHandler(Client client, AtomicBoolean[] remoteBpStatus, Map<String, Object> conf) {
         this.client = client;
         this.remoteBpStatus = remoteBpStatus;
-        _des = new KryoValuesDeserializer(conf);
+        des = new KryoValuesDeserializer(conf);
     }
 
     @Override
@@ -80,7 +80,7 @@ public class StormClientHandler extends ChannelInboundHandlerAdapter {
             if (tm.task() != Server.LOAD_METRICS_TASK_ID) {
                 throw new RuntimeException("Metrics messages are sent to the system task (" + client.getDstAddress() + ") " + tm);
             }
-            List<Object> metrics = _des.deserialize(tm.message());
+            List<Object> metrics = des.deserialize(tm.message());
             if (metrics.size() < 1) {
                 throw new RuntimeException("No metrics data in the metrics message (" + client.getDstAddress() + ") " + metrics);
             }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/StormServerHandler.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/StormServerHandler.java
index 3c256bb..542dd9c 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/StormServerHandler.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/StormServerHandler.java
@@ -28,11 +28,11 @@ public class StormServerHandler extends ChannelInboundHandlerAdapter {
     private static final Logger LOG = LoggerFactory.getLogger(StormServerHandler.class);
     private static final Set<Class<?>> ALLOWED_EXCEPTIONS = new HashSet<>(Arrays.asList(new Class<?>[]{ IOException.class }));
     private final IServer server;
-    private final AtomicInteger failure_count;
+    private final AtomicInteger failureCount;
 
     public StormServerHandler(IServer server) {
         this.server = server;
-        failure_count = new AtomicInteger(0);
+        failureCount = new AtomicInteger(0);
     }
 
     @Override
@@ -51,7 +51,7 @@ public class StormServerHandler extends ChannelInboundHandlerAdapter {
             server.received(msg, channel.remoteAddress().toString(), channel);
         } catch (InterruptedException e) {
             LOG.info("failed to enqueue a request message", e);
-            failure_count.incrementAndGet();
+            failureCount.incrementAndGet();
         }
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/metric/EventLoggerBolt.java b/storm-client/src/jvm/org/apache/storm/metric/EventLoggerBolt.java
index a66fe47..9776848 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/EventLoggerBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/EventLoggerBolt.java
@@ -12,6 +12,10 @@
 
 package org.apache.storm.metric;
 
+import static org.apache.storm.daemon.StormCommon.TOPOLOGY_EVENT_LOGGER_ARGUMENTS;
+import static org.apache.storm.daemon.StormCommon.TOPOLOGY_EVENT_LOGGER_CLASS;
+import static org.apache.storm.metric.IEventLogger.EventInfo;
+
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -23,10 +27,6 @@ import org.apache.storm.tuple.Tuple;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.storm.daemon.StormCommon.TOPOLOGY_EVENT_LOGGER_ARGUMENTS;
-import static org.apache.storm.daemon.StormCommon.TOPOLOGY_EVENT_LOGGER_CLASS;
-import static org.apache.storm.metric.IEventLogger.EventInfo;
-
 public class EventLoggerBolt implements IBolt {
 
     /*
diff --git a/storm-client/src/jvm/org/apache/storm/metric/IEventLogger.java b/storm-client/src/jvm/org/apache/storm/metric/IEventLogger.java
index 76b9b41..7a8af67 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/IEventLogger.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/IEventLogger.java
@@ -73,7 +73,7 @@ public interface IEventLogger {
         }
 
         /**
-         * Returns a default formatted string with fields separated by ","
+         * Returns a default formatted string with fields separated by ",".
          *
          * @return a default formatted string with fields separated by ","
          */
diff --git a/storm-client/src/jvm/org/apache/storm/metric/LoggingMetricsConsumer.java b/storm-client/src/jvm/org/apache/storm/metric/LoggingMetricsConsumer.java
index 2e722b2..769163e 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/LoggingMetricsConsumer.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/LoggingMetricsConsumer.java
@@ -23,17 +23,17 @@ import org.slf4j.LoggerFactory;
 /**
  * Listens for all metrics, dumps them to log
  *
- * To use, add this to your topology's configuration:
+ * <p>To use, add this to your topology's configuration:
  *
- * ```java conf.registerMetricsConsumer(org.apache.storm.metrics.LoggingMetricsConsumer.class, 1); ```
+ * <p>```java conf.registerMetricsConsumer(org.apache.storm.metrics.LoggingMetricsConsumer.class, 1); ```
  *
- * Or edit the storm.yaml config file:
+ * <p>Or edit the storm.yaml config file:
  *
- * ```yaml topology.metrics.consumer.register: - class: "org.apache.storm.metrics.LoggingMetricsConsumer" parallelism.hint: 1 ```
+ * <p>```yaml topology.metrics.consumer.register: - class: "org.apache.storm.metrics.LoggingMetricsConsumer" parallelism.hint: 1 ```
  */
 public class LoggingMetricsConsumer implements IMetricsConsumer {
     public static final Logger LOG = LoggerFactory.getLogger(LoggingMetricsConsumer.class);
-    static private String padding = "                       ";
+    private static String padding = "                       ";
 
     @Override
     public void prepare(Map<String, Object> topoConf, Object registrationArgument, TopologyContext context, IErrorReporter errorReporter) {
@@ -51,8 +51,10 @@ public class LoggingMetricsConsumer implements IMetricsConsumer {
         for (DataPoint p : dataPoints) {
             sb.delete(header.length(), sb.length());
             sb.append(p.name)
-              .append(padding).delete(header.length() + 23, sb.length()).append("\t")
-              .append(p.value);
+                    .append(padding)
+                    .delete(header.length() + 23, sb.length())
+                    .append("\t")
+                    .append(p.value);
             LOG.info(sb.toString());
         }
     }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/MetricsConsumerBolt.java b/storm-client/src/jvm/org/apache/storm/metric/MetricsConsumerBolt.java
index 46bac22..e76868a 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/MetricsConsumerBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/MetricsConsumerBolt.java
@@ -32,72 +32,75 @@ import org.slf4j.LoggerFactory;
 
 public class MetricsConsumerBolt implements IBolt {
     public static final Logger LOG = LoggerFactory.getLogger(MetricsConsumerBolt.class);
-    private final int _maxRetainMetricTuples;
-    private final Predicate<IMetricsConsumer.DataPoint> _filterPredicate;
-    private final DataPointExpander _expander;
-    private final BlockingQueue<MetricsTask> _taskQueue;
-    IMetricsConsumer _metricsConsumer;
-    String _consumerClassName;
-    OutputCollector _collector;
-    Object _registrationArgument;
-    private Thread _taskExecuteThread;
-    private volatile boolean _running = true;
+    private final int maxRetainMetricTuples;
+    private final Predicate<IMetricsConsumer.DataPoint> filterPredicate;
+    private final DataPointExpander expander;
+    private final BlockingQueue<MetricsTask> taskQueue;
+    IMetricsConsumer metricsConsumer;
+    String consumerClassName;
+    OutputCollector collector;
+    Object registrationArgument;
+    private Thread taskExecuteThread;
+    private volatile boolean running = true;
 
     public MetricsConsumerBolt(String consumerClassName, Object registrationArgument, int maxRetainMetricTuples,
                                Predicate<IMetricsConsumer.DataPoint> filterPredicate, DataPointExpander expander) {
 
-        _consumerClassName = consumerClassName;
-        _registrationArgument = registrationArgument;
-        _maxRetainMetricTuples = maxRetainMetricTuples;
-        _filterPredicate = filterPredicate;
-        _expander = expander;
+        this.consumerClassName = consumerClassName;
+        this.registrationArgument = registrationArgument;
+        this.maxRetainMetricTuples = maxRetainMetricTuples;
+        this.filterPredicate = filterPredicate;
+        this.expander = expander;
 
-        if (_maxRetainMetricTuples > 0) {
-            _taskQueue = new LinkedBlockingDeque<>(_maxRetainMetricTuples);
+        if (this.maxRetainMetricTuples > 0) {
+            taskQueue = new LinkedBlockingDeque<>(this.maxRetainMetricTuples);
         } else {
-            _taskQueue = new LinkedBlockingDeque<>();
+            taskQueue = new LinkedBlockingDeque<>();
         }
     }
 
     @Override
     public void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) {
         try {
-            _metricsConsumer = (IMetricsConsumer) Class.forName(_consumerClassName).newInstance();
+            metricsConsumer = (IMetricsConsumer) Class.forName(consumerClassName).newInstance();
         } catch (Exception e) {
-            throw new RuntimeException("Could not instantiate a class listed in config under section " +
-                                       Config.TOPOLOGY_METRICS_CONSUMER_REGISTER + " with fully qualified name " + _consumerClassName, e);
+            throw new RuntimeException("Could not instantiate a class listed in config under section "
+                            + Config.TOPOLOGY_METRICS_CONSUMER_REGISTER
+                            + " with fully qualified name "
+                            + consumerClassName,
+                    e);
         }
-        _metricsConsumer.prepare(topoConf, _registrationArgument, context, collector);
-        _collector = collector;
-        _taskExecuteThread = new Thread(new MetricsHandlerRunnable());
-        _taskExecuteThread.setDaemon(true);
-        _taskExecuteThread.start();
+        metricsConsumer.prepare(topoConf, registrationArgument, context, collector);
+        this.collector = collector;
+        taskExecuteThread = new Thread(new MetricsHandlerRunnable());
+        taskExecuteThread.setDaemon(true);
+        taskExecuteThread.start();
     }
 
     @Override
     public void execute(Tuple input) {
         IMetricsConsumer.TaskInfo taskInfo = (IMetricsConsumer.TaskInfo) input.getValue(0);
         Collection<IMetricsConsumer.DataPoint> dataPoints = (Collection) input.getValue(1);
-        Collection<IMetricsConsumer.DataPoint> expandedDataPoints = _expander.expandDataPoints(dataPoints);
+        Collection<IMetricsConsumer.DataPoint> expandedDataPoints = expander.expandDataPoints(dataPoints);
         List<IMetricsConsumer.DataPoint> filteredDataPoints = getFilteredDataPoints(expandedDataPoints);
         MetricsTask metricsTask = new MetricsTask(taskInfo, filteredDataPoints);
 
-        while (!_taskQueue.offer(metricsTask)) {
-            _taskQueue.poll();
+        while (!taskQueue.offer(metricsTask)) {
+            taskQueue.poll();
         }
 
-        _collector.ack(input);
+        collector.ack(input);
     }
 
     private List<IMetricsConsumer.DataPoint> getFilteredDataPoints(Collection<IMetricsConsumer.DataPoint> dataPoints) {
-        return Lists.newArrayList(Iterables.filter(dataPoints, _filterPredicate));
+        return Lists.newArrayList(Iterables.filter(dataPoints, filterPredicate));
     }
 
     @Override
     public void cleanup() {
-        _running = false;
-        _metricsConsumer.cleanup();
-        _taskExecuteThread.interrupt();
+        running = false;
+        metricsConsumer.cleanup();
+        taskExecuteThread.interrupt();
     }
 
     static class MetricsTask {
@@ -122,10 +125,10 @@ public class MetricsConsumerBolt implements IBolt {
 
         @Override
         public void run() {
-            while (_running) {
+            while (running) {
                 try {
-                    MetricsTask task = _taskQueue.take();
-                    _metricsConsumer.handleDataPoints(task.getTaskInfo(), task.getDataPoints());
+                    MetricsTask task = taskQueue.take();
+                    metricsConsumer.handleDataPoints(task.getTaskInfo(), task.getDataPoints());
                 } catch (InterruptedException e) {
                     break;
                 } catch (Throwable t) {
diff --git a/storm-client/src/jvm/org/apache/storm/metric/SystemBolt.java b/storm-client/src/jvm/org/apache/storm/metric/SystemBolt.java
index a168230..5e978a7 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/SystemBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/SystemBolt.java
@@ -30,26 +30,25 @@ import org.apache.storm.tuple.Tuple;
 import org.apache.storm.utils.ObjectReader;
 import org.apache.storm.utils.ReflectionUtils;
 
-
 // There is one task inside one executor for each worker of the topology.
 // TaskID is always -1, therefore you can only send-unanchored tuples to co-located SystemBolt.
 // This bolt was conceived to export worker stats via metrics api.
 public class SystemBolt implements IBolt {
-    private static boolean _prepareWasCalled = false;
+    private static boolean prepareWasCalled = false;
 
     @SuppressWarnings({ "unchecked" })
     @Override
     public void prepare(final Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) {
-        if (_prepareWasCalled && !"local".equals(topoConf.get(Config.STORM_CLUSTER_MODE))) {
+        if (prepareWasCalled && !"local".equals(topoConf.get(Config.STORM_CLUSTER_MODE))) {
             throw new RuntimeException("A single worker should have 1 SystemBolt instance.");
         }
-        _prepareWasCalled = true;
+        prepareWasCalled = true;
 
         int bucketSize = ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS));
 
-        final RuntimeMXBean jvmRT = ManagementFactory.getRuntimeMXBean();
-        context.registerMetric("uptimeSecs", () -> jvmRT.getUptime() / 1000.0, bucketSize);
-        context.registerMetric("startTimeSecs", () -> jvmRT.getStartTime() / 1000.0, bucketSize);
+        final RuntimeMXBean jvmRt = ManagementFactory.getRuntimeMXBean();
+        context.registerMetric("uptimeSecs", () -> jvmRt.getUptime() / 1000.0, bucketSize);
+        context.registerMetric("startTimeSecs", () -> jvmRt.getStartTime() / 1000.0, bucketSize);
 
         final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
         context.registerMetric("threadCount", threadBean::getThreadCount, bucketSize);
@@ -68,6 +67,7 @@ public class SystemBolt implements IBolt {
             }
         }, bucketSize);
 
+        @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
         final MemoryMXBean jvmMemRT = ManagementFactory.getMemoryMXBean();
 
         context.registerMetric("memory/heap", new MemoryUsageMetric(jvmMemRT::getHeapMemoryUsage), bucketSize);
@@ -104,15 +104,15 @@ public class SystemBolt implements IBolt {
     }
 
     private static class MemoryUsageMetric implements IMetric {
-        Supplier<MemoryUsage> _getUsage;
+        Supplier<MemoryUsage> getUsage;
 
         public MemoryUsageMetric(Supplier<MemoryUsage> getUsage) {
-            _getUsage = getUsage;
+            this.getUsage = getUsage;
         }
 
         @Override
         public Object getValueAndReset() {
-            MemoryUsage memUsage = _getUsage.get();
+            MemoryUsage memUsage = getUsage.get();
             HashMap<String, Object> m = new HashMap<>();
             m.put("maxBytes", memUsage.getMax());
             m.put("committedBytes", memUsage.getCommitted());
@@ -127,28 +127,28 @@ public class SystemBolt implements IBolt {
     // canonically the metrics data exported is time bucketed when doing counts.
     // convert the absolute values here into time buckets.
     private static class GarbageCollectorMetric implements IMetric {
-        GarbageCollectorMXBean _gcBean;
-        Long _collectionCount;
-        Long _collectionTime;
+        GarbageCollectorMXBean gcBean;
+        Long collectionCount;
+        Long collectionTime;
 
         public GarbageCollectorMetric(GarbageCollectorMXBean gcBean) {
-            _gcBean = gcBean;
+            this.gcBean = gcBean;
         }
 
         @Override
         public Object getValueAndReset() {
-            Long collectionCountP = _gcBean.getCollectionCount();
-            Long collectionTimeP = _gcBean.getCollectionTime();
+            Long collectionCountP = gcBean.getCollectionCount();
+            Long collectionTimeP = gcBean.getCollectionTime();
 
             Map<String, Object> ret = null;
-            if (_collectionCount != null && _collectionTime != null) {
+            if (collectionCount != null && collectionTime != null) {
                 ret = new HashMap<>();
-                ret.put("count", collectionCountP - _collectionCount);
-                ret.put("timeMs", collectionTimeP - _collectionTime);
+                ret.put("count", collectionCountP - collectionCount);
+                ret.put("timeMs", collectionTimeP - collectionTime);
             }
 
-            _collectionCount = collectionCountP;
-            _collectionTime = collectionTimeP;
+            collectionCount = collectionCountP;
+            collectionTime = collectionTimeP;
             return ret;
         }
     }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/AssignableMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/AssignableMetric.java
index 8e980ad..8bcc40b 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/AssignableMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/AssignableMetric.java
@@ -13,18 +13,18 @@
 package org.apache.storm.metric.api;
 
 public class AssignableMetric implements IMetric {
-    Object _value;
+    Object value;
 
     public AssignableMetric(Object value) {
-        _value = value;
+        this.value = value;
     }
 
     public void setValue(Object value) {
-        _value = value;
+        this.value = value;
     }
 
     @Override
     public Object getValueAndReset() {
-        return _value;
+        return value;
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/CombinedMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/CombinedMetric.java
index 944533f..9b84a98 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/CombinedMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/CombinedMetric.java
@@ -13,22 +13,22 @@
 package org.apache.storm.metric.api;
 
 public class CombinedMetric implements IMetric {
-    private final ICombiner _combiner;
-    private Object _value;
+    private final ICombiner combiner;
+    private Object value;
 
     public CombinedMetric(ICombiner combiner) {
-        _combiner = combiner;
-        _value = _combiner.identity();
+        this.combiner = combiner;
+        value = this.combiner.identity();
     }
 
     public void update(Object value) {
-        _value = _combiner.combine(_value, value);
+        this.value = combiner.combine(this.value, value);
     }
 
     @Override
     public Object getValueAndReset() {
-        Object ret = _value;
-        _value = _combiner.identity();
+        Object ret = value;
+        value = combiner.identity();
         return ret;
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/CountMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/CountMetric.java
index 8284ed9..36df8f9 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/CountMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/CountMetric.java
@@ -13,23 +13,23 @@
 package org.apache.storm.metric.api;
 
 public class CountMetric implements IMetric {
-    long _value = 0;
+    long value = 0;
 
     public CountMetric() {
     }
 
     public void incr() {
-        _value++;
+        value++;
     }
 
     public void incrBy(long incrementBy) {
-        _value += incrementBy;
+        value += incrementBy;
     }
 
     @Override
     public Object getValueAndReset() {
-        long ret = _value;
-        _value = 0;
+        long ret = value;
+        value = 0;
         return ret;
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/IMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/IMetric.java
index 4c45f0d..c6f86f4 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/IMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/IMetric.java
@@ -17,6 +17,7 @@ package org.apache.storm.metric.api;
  */
 public interface IMetric {
     /**
+     * Get value and reset.
      * @return an object that will be sent sent to {@link IMetricsConsumer#handleDataPoints(org.apache.storm.metric.api.IMetricsConsumer
      * .TaskInfo,
      *     java.util.Collection)}. If null is returned nothing will be sent. If this value can be reset, like with a counter, a side effect
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/IMetricsConsumer.java b/storm-client/src/jvm/org/apache/storm/metric/api/IMetricsConsumer.java
index 1558354..a332dc4 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/IMetricsConsumer.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/IMetricsConsumer.java
@@ -48,8 +48,10 @@ public interface IMetricsConsumer {
 
         @Override
         public String toString() {
-            return "TASK_INFO: { host: " + srcWorkerHost + ":" + srcWorkerPort +
-                   " comp: " + srcComponentId + "[" + srcTaskId + "]}";
+            return "TASK_INFO: { host: " + srcWorkerHost
+                    + ":" + srcWorkerPort
+                    + " comp: " + srcComponentId
+                    + "[" + srcTaskId + "]}";
         }
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/MeanReducer.java b/storm-client/src/jvm/org/apache/storm/metric/api/MeanReducer.java
index e78ddb8..d60015b 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/MeanReducer.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/MeanReducer.java
@@ -18,11 +18,6 @@
 
 package org.apache.storm.metric.api;
 
-class MeanReducerState {
-    public int count = 0;
-    public double sum = 0.0;
-}
-
 public class MeanReducer implements IReducer<MeanReducerState> {
     @Override
     public MeanReducerState init() {
diff --git a/storm-client/src/jvm/org/apache/storm/utils/WrappedHBExecutionException.java b/storm-client/src/jvm/org/apache/storm/metric/api/MeanReducerState.java
similarity index 66%
copy from storm-client/src/jvm/org/apache/storm/utils/WrappedHBExecutionException.java
copy to storm-client/src/jvm/org/apache/storm/metric/api/MeanReducerState.java
index ab976c0..451bbb0 100644
--- a/storm-client/src/jvm/org/apache/storm/utils/WrappedHBExecutionException.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/MeanReducerState.java
@@ -16,20 +16,9 @@
  * limitations under the License.
  */
 
-package org.apache.storm.utils;
+package org.apache.storm.metric.api;
 
-import org.apache.storm.generated.HBExecutionException;
-
-/**
- * Wraps the generated TException to allow getMessage() to return a valid string.
- */
-public class WrappedHBExecutionException extends HBExecutionException {
-    public WrappedHBExecutionException(String msg) {
-        super(msg);
-    }
-
-    @Override
-    public String getMessage() {
-        return this.get_msg();
-    }
+class MeanReducerState {
+    public int count = 0;
+    public double sum = 0.0;
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/MultiCountMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/MultiCountMetric.java
index cc2dce2..d133665 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/MultiCountMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/MultiCountMetric.java
@@ -16,15 +16,15 @@ import java.util.HashMap;
 import java.util.Map;
 
 public class MultiCountMetric implements IMetric {
-    Map<String, CountMetric> _value = new HashMap<>();
+    Map<String, CountMetric> value = new HashMap<>();
 
     public MultiCountMetric() {
     }
 
     public CountMetric scope(String key) {
-        CountMetric val = _value.get(key);
+        CountMetric val = value.get(key);
         if (val == null) {
-            _value.put(key, val = new CountMetric());
+            value.put(key, val = new CountMetric());
         }
         return val;
     }
@@ -32,7 +32,7 @@ public class MultiCountMetric implements IMetric {
     @Override
     public Map<String, Object> getValueAndReset() {
         Map<String, Object> ret = new HashMap<>();
-        for (Map.Entry<String, CountMetric> e : _value.entrySet()) {
+        for (Map.Entry<String, CountMetric> e : value.entrySet()) {
             ret.put(e.getKey(), e.getValue().getValueAndReset());
         }
         return ret;
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/MultiReducedMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/MultiReducedMetric.java
index 079b320..c9c8590 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/MultiReducedMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/MultiReducedMetric.java
@@ -16,17 +16,17 @@ import java.util.HashMap;
 import java.util.Map;
 
 public class MultiReducedMetric implements IMetric {
-    Map<String, ReducedMetric> _value = new HashMap<>();
-    IReducer _reducer;
+    Map<String, ReducedMetric> value = new HashMap<>();
+    IReducer reducer;
 
     public MultiReducedMetric(IReducer reducer) {
-        _reducer = reducer;
+        this.reducer = reducer;
     }
 
     public ReducedMetric scope(String key) {
-        ReducedMetric val = _value.get(key);
+        ReducedMetric val = value.get(key);
         if (val == null) {
-            _value.put(key, val = new ReducedMetric(_reducer));
+            value.put(key, val = new ReducedMetric(reducer));
         }
         return val;
     }
@@ -34,7 +34,7 @@ public class MultiReducedMetric implements IMetric {
     @Override
     public Map<String, Object> getValueAndReset() {
         Map<String, Object> ret = new HashMap<>();
-        for (Map.Entry<String, ReducedMetric> e : _value.entrySet()) {
+        for (Map.Entry<String, ReducedMetric> e : value.entrySet()) {
             Object val = e.getValue().getValueAndReset();
             if (val != null) {
                 ret.put(e.getKey(), val);
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/ReducedMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/ReducedMetric.java
index 92ee6ff..718d34f 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/ReducedMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/ReducedMetric.java
@@ -13,22 +13,22 @@
 package org.apache.storm.metric.api;
 
 public class ReducedMetric implements IMetric {
-    private final IReducer _reducer;
-    private Object _accumulator;
+    private final IReducer reducer;
+    private Object accumulator;
 
     public ReducedMetric(IReducer reducer) {
-        _reducer = reducer;
-        _accumulator = _reducer.init();
+        this.reducer = reducer;
+        accumulator = this.reducer.init();
     }
 
     public void update(Object value) {
-        _accumulator = _reducer.reduce(_accumulator, value);
+        accumulator = reducer.reduce(accumulator, value);
     }
 
     @Override
     public Object getValueAndReset() {
-        Object ret = _reducer.extractResult(_accumulator);
-        _accumulator = _reducer.init();
+        Object ret = reducer.extractResult(accumulator);
+        accumulator = reducer.init();
         return ret;
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/StateMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/StateMetric.java
index 1234bda..cde20b4 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/StateMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/StateMetric.java
@@ -13,14 +13,14 @@
 package org.apache.storm.metric.api;
 
 public class StateMetric implements IMetric {
-    private IStatefulObject _obj;
+    private IStatefulObject obj;
 
     public StateMetric(IStatefulObject obj) {
-        _obj = obj;
+        this.obj = obj;
     }
 
     @Override
     public Object getValueAndReset() {
-        return _obj.getState();
+        return obj.getState();
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/rpc/CountShellMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/rpc/CountShellMetric.java
index abc2074..dab9d69 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/rpc/CountShellMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/rpc/CountShellMetric.java
@@ -15,12 +15,15 @@ package org.apache.storm.metric.api.rpc;
 import org.apache.storm.metric.api.CountMetric;
 
 public class CountShellMetric extends CountMetric implements IShellMetric {
-    /***
+    /**
+     * Update metric from RPC.
+     *
      * @param value should be null or long
-     *  if value is null, it will call incr()
-     *  if value is long, it will call incrBy((long)params)
-     * */
+     *     if value is null, it will call incr()
+     *     if value is long, it will call incrBy((long)params)
+     */
     @Override
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public void updateMetricFromRPC(Object value) {
         if (value == null) {
             incr();
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/rpc/IShellMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/rpc/IShellMetric.java
index 8de34ba..a66e178 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/rpc/IShellMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/rpc/IShellMetric.java
@@ -15,12 +15,13 @@ package org.apache.storm.metric.api.rpc;
 import org.apache.storm.metric.api.IMetric;
 
 public interface IShellMetric extends IMetric {
-    /***
-     * @function
-     *     This interface is used by ShellBolt and ShellSpout through RPC call to update Metric 
+    /**
+     * This interface is used by ShellBolt and ShellSpout through RPC call to update Metric.
+     *
      * @param
      *     value used to update metric, its's meaning change according implementation
      *     Object can be any json support types: String, Long, Double, Boolean, Null, List, Map
-     * */
+     */
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public void updateMetricFromRPC(Object value);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupCpu.java b/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupCpu.java
index af925eb..eef6546 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupCpu.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupCpu.java
@@ -23,7 +23,7 @@ import org.apache.storm.container.cgroup.core.CpuacctCore;
 import org.apache.storm.container.cgroup.core.CpuacctCore.StatType;
 
 /**
- * Report CPU used in the cgroup
+ * Report CPU used in the cgroup.
  */
 public class CGroupCpu extends CGroupMetricsBase<Map<String, Long>> {
     long previousSystem = 0;
@@ -34,6 +34,7 @@ public class CGroupCpu extends CGroupMetricsBase<Map<String, Long>> {
         super(conf, SubSystemType.cpuacct);
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public synchronized int getUserHZ() throws IOException {
         if (userHz < 0) {
             ProcessBuilder pb = new ProcessBuilder("getconf", "CLK_TCK");
@@ -52,6 +53,7 @@ public class CGroupCpu extends CGroupMetricsBase<Map<String, Long>> {
         long systemHz = stat.get(StatType.system);
         long userHz = stat.get(StatType.user);
         long user = userHz - previousUser;
+        @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance")
         long sys = systemHz - previousSystem;
         previousUser = userHz;
         previousSystem = systemHz;
diff --git a/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupMemoryLimit.java b/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupMemoryLimit.java
index adfebe6..d6d5750 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupMemoryLimit.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupMemoryLimit.java
@@ -20,12 +20,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Reports the current memory limit of the cgroup for this worker
+ * Reports the current memory limit of the cgroup for this worker.
  */
 public class CGroupMemoryLimit extends CGroupMetricsBase<Long> {
     private static final Logger LOG = LoggerFactory.getLogger(CGroupMemoryLimit.class);
     private static final long BYTES_PER_MB = 1024 * 1024;
-    private final long _workerLimitBytes;
+    private final long workerLimitBytes;
 
     public CGroupMemoryLimit(Map<String, Object> conf) {
         super(conf, SubSystemType.memory);
@@ -36,13 +36,13 @@ public class CGroupMemoryLimit extends CGroupMetricsBase<Long> {
         } catch (NumberFormatException e) {
             LOG.warn("Error Parsing worker.memory_limit_mb {}", e);
         }
-        _workerLimitBytes = BYTES_PER_MB * limit;
+        workerLimitBytes = BYTES_PER_MB * limit;
     }
 
     @Override
     public Long getDataFrom(CgroupCore core) throws Exception {
-        if (_workerLimitBytes > 0) {
-            return _workerLimitBytes;
+        if (workerLimitBytes > 0) {
+            return workerLimitBytes;
         }
         return ((MemoryCore) core).getPhysicalUsageLimit();
     }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupMemoryUsage.java b/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupMemoryUsage.java
index bf33805..e30feae 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupMemoryUsage.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/cgroup/CGroupMemoryUsage.java
@@ -18,7 +18,7 @@ import org.apache.storm.container.cgroup.core.CgroupCore;
 import org.apache.storm.container.cgroup.core.MemoryCore;
 
 /**
- * Reports the current memory usage of the cgroup for this worker
+ * Reports the current memory usage of the cgroup for this worker.
  */
 public class CGroupMemoryUsage extends CGroupMetricsBase<Long> {
 
diff --git a/storm-client/src/jvm/org/apache/storm/metric/internal/CountStatAndMetric.java b/storm-client/src/jvm/org/apache/storm/metric/internal/CountStatAndMetric.java
index d8112db..31c51fd 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/internal/CountStatAndMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/internal/CountStatAndMetric.java
@@ -22,29 +22,31 @@ import org.apache.storm.metric.api.IMetric;
  * Acts as a Count Metric, but also keeps track of approximate counts for the last 10 mins, 3 hours, 1 day, and all time.
  */
 public class CountStatAndMetric implements IMetric {
-    private final AtomicLong _currentBucket;
+    private final AtomicLong currentBucket;
     //10 min values
-    private final int _tmSize;
-    private final long[] _tmBuckets;
-    private final long[] _tmTime;
+    private final int tmSize;
+    private final long[] tmBuckets;
+    private final long[] tmTime;
     //3 hour values
-    private final int _thSize;
-    private final long[] _thBuckets;
-    private final long[] _thTime;
+    private final int thSize;
+    private final long[] thBuckets;
+    private final long[] thTime;
     //1 day values
-    private final int _odSize;
-    private final long[] _odBuckets;
-    private final long[] _odTime;
-    private final TimerTask _task;
+    private final int odSize;
+    private final long[] odBuckets;
+    private final long[] odTime;
+    private final TimerTask task;
     // All internal state except for the count of the current bucket are
     // protected using a lock on this counter
-    private long _bucketStart;
+    private long bucketStart;
     //exact variable time, that is added to the current bucket
-    private long _exactExtra;
+    private long exactExtra;
     //all time
-    private long _allTime;
+    private long allTime;
 
     /**
+     * Constructor.
+     *
      * @param numBuckets the number of buckets to divide the time periods into.
      */
     public CountStatAndMetric(int numBuckets) {
@@ -52,7 +54,7 @@ public class CountStatAndMetric implements IMetric {
     }
 
     /**
-     * Constructor
+     * Constructor.
      *
      * @param numBuckets the number of buckets to divide the time periods into.
      * @param startTime  if positive the simulated time to start the from.
@@ -61,28 +63,28 @@ public class CountStatAndMetric implements IMetric {
         numBuckets = Math.max(numBuckets, 2);
         //We want to capture the full time range, so the target size is as
         // if we had one bucket less, then we do
-        _tmSize = 10 * 60 * 1000 / (numBuckets - 1);
-        _thSize = 3 * 60 * 60 * 1000 / (numBuckets - 1);
-        _odSize = 24 * 60 * 60 * 1000 / (numBuckets - 1);
-        if (_tmSize < 1 || _thSize < 1 || _odSize < 1) {
+        tmSize = 10 * 60 * 1000 / (numBuckets - 1);
+        thSize = 3 * 60 * 60 * 1000 / (numBuckets - 1);
+        odSize = 24 * 60 * 60 * 1000 / (numBuckets - 1);
+        if (tmSize < 1 || thSize < 1 || odSize < 1) {
             throw new IllegalArgumentException("number of buckets is too large to be supported");
         }
-        _tmBuckets = new long[numBuckets];
-        _tmTime = new long[numBuckets];
-        _thBuckets = new long[numBuckets];
-        _thTime = new long[numBuckets];
-        _odBuckets = new long[numBuckets];
-        _odTime = new long[numBuckets];
-        _allTime = 0;
-        _exactExtra = 0;
-
-        _bucketStart = startTime >= 0 ? startTime : System.currentTimeMillis();
-        _currentBucket = new AtomicLong(0);
+        tmBuckets = new long[numBuckets];
+        tmTime = new long[numBuckets];
+        thBuckets = new long[numBuckets];
+        thTime = new long[numBuckets];
+        odBuckets = new long[numBuckets];
+        odTime = new long[numBuckets];
+        allTime = 0;
+        exactExtra = 0;
+
+        bucketStart = startTime >= 0 ? startTime : System.currentTimeMillis();
+        currentBucket = new AtomicLong(0);
         if (startTime < 0) {
-            _task = new Fresher();
-            MetricStatTimer._timer.scheduleAtFixedRate(_task, _tmSize, _tmSize);
+            task = new Fresher();
+            MetricStatTimer.timer.scheduleAtFixedRate(task, tmSize, tmSize);
         } else {
-            _task = null;
+            task = null;
         }
     }
 
@@ -92,7 +94,7 @@ public class CountStatAndMetric implements IMetric {
      * @param count number to count
      */
     public void incBy(long count) {
-        _currentBucket.addAndGet(count);
+        currentBucket.addAndGet(count);
     }
 
 
@@ -101,29 +103,30 @@ public class CountStatAndMetric implements IMetric {
         return getValueAndReset(System.currentTimeMillis());
     }
 
+    @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance")
     synchronized Object getValueAndReset(long now) {
-        long value = _currentBucket.getAndSet(0);
-        long timeSpent = now - _bucketStart;
-        long ret = value + _exactExtra;
-        _bucketStart = now;
-        _exactExtra = 0;
+        long value = currentBucket.getAndSet(0);
+        long timeSpent = now - bucketStart;
+        long ret = value + exactExtra;
+        bucketStart = now;
+        exactExtra = 0;
         rotateBuckets(value, timeSpent);
         return ret;
     }
 
     synchronized void rotateSched(long now) {
-        long value = _currentBucket.getAndSet(0);
-        long timeSpent = now - _bucketStart;
-        _exactExtra += value;
-        _bucketStart = now;
+        long value = currentBucket.getAndSet(0);
+        long timeSpent = now - bucketStart;
+        exactExtra += value;
+        bucketStart = now;
         rotateBuckets(value, timeSpent);
     }
 
     synchronized void rotateBuckets(long value, long timeSpent) {
-        rotate(value, timeSpent, _tmSize, _tmTime, _tmBuckets);
-        rotate(value, timeSpent, _thSize, _thTime, _thBuckets);
-        rotate(value, timeSpent, _odSize, _odTime, _odBuckets);
-        _allTime += value;
+        rotate(value, timeSpent, tmSize, tmTime, tmBuckets);
+        rotate(value, timeSpent, thSize, thTime, thBuckets);
+        rotate(value, timeSpent, odSize, odTime, odBuckets);
+        allTime += value;
     }
 
     private synchronized void rotate(long value, long timeSpent, long targetSize, long[] times, long[] buckets) {
@@ -146,6 +149,7 @@ public class CountStatAndMetric implements IMetric {
     }
 
     /**
+     * Get time counts.
      * @return a map of time window to count. Keys are "600" for last 10 mins "10800" for the last 3 hours "86400" for the last day
      *     ":all-time" for all time
      */
@@ -155,12 +159,12 @@ public class CountStatAndMetric implements IMetric {
 
     synchronized Map<String, Long> getTimeCounts(long now) {
         Map<String, Long> ret = new HashMap<>();
-        long value = _currentBucket.get();
-        long timeSpent = now - _bucketStart;
-        ret.put("600", readApproximateTime(value, timeSpent, _tmTime, _tmBuckets, 600 * 1000));
-        ret.put("10800", readApproximateTime(value, timeSpent, _thTime, _thBuckets, 10800 * 1000));
-        ret.put("86400", readApproximateTime(value, timeSpent, _odTime, _odBuckets, 86400 * 1000));
-        ret.put(":all-time", value + _allTime);
+        long value = currentBucket.get();
+        long timeSpent = now - bucketStart;
+        ret.put("600", readApproximateTime(value, timeSpent, tmTime, tmBuckets, 600 * 1000));
+        ret.put("10800", readApproximateTime(value, timeSpent, thTime, thBuckets, 10800 * 1000));
+        ret.put("86400", readApproximateTime(value, timeSpent, odTime, odBuckets, 86400 * 1000));
+        ret.put(":all-time", value + allTime);
         return ret;
     }
 
@@ -181,8 +185,8 @@ public class CountStatAndMetric implements IMetric {
     }
 
     public void close() {
-        if (_task != null) {
-            _task.cancel();
+        if (task != null) {
+            task.cancel();
         }
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/metric/internal/LatencyStatAndMetric.java b/storm-client/src/jvm/org/apache/storm/metric/internal/LatencyStatAndMetric.java
index c66f2a1..ddaadb7 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/internal/LatencyStatAndMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/internal/LatencyStatAndMetric.java
@@ -31,37 +31,39 @@ public class LatencyStatAndMetric implements IMetric {
     //The current lat and count buckets are protected by a different lock
     // from the other buckets.  This is to reduce the lock contention
     // When doing complex calculations.  Never grab the instance object lock
-    // while holding _currentLock to avoid deadlocks
-    private final Object _currentLock = new byte[0];
+    // while holding currentLock to avoid deadlocks
+    private final Object currentLock = new byte[0];
     //10 min values
-    private final int _tmSize;
-    private final long[] _tmLatBuckets;
-    private final long[] _tmCountBuckets;
-    private final long[] _tmTime;
+    private final int tmSize;
+    private final long[] tmLatBuckets;
+    private final long[] tmCountBuckets;
+    private final long[] tmTime;
     //3 hour values
-    private final int _thSize;
-    private final long[] _thLatBuckets;
-    private final long[] _thCountBuckets;
-    private final long[] _thTime;
+    private final int thSize;
+    private final long[] thLatBuckets;
+    private final long[] thCountBuckets;
+    private final long[] thTime;
     //1 day values
-    private final int _odSize;
-    private final long[] _odLatBuckets;
-    private final long[] _odCountBuckets;
-    private final long[] _odTime;
-    private final TimerTask _task;
-    private long _currentLatBucket;
-    private long _currentCountBucket;
+    private final int odSize;
+    private final long[] odLatBuckets;
+    private final long[] odCountBuckets;
+    private final long[] odTime;
+    private final TimerTask task;
+    private long currentLatBucket;
+    private long currentCountBucket;
     // All internal state except for the current buckets are
     // protected using the Object Lock
-    private long _bucketStart;
+    private long bucketStart;
     //exact variable time, that is added to the current bucket
-    private long _exactExtraLat;
-    private long _exactExtraCount;
+    private long exactExtraLat;
+    private long exactExtraCount;
     //all time
-    private long _allTimeLat;
-    private long _allTimeCount;
+    private long allTimeLat;
+    private long allTimeCount;
 
     /**
+     * Constructor.
+     *
      * @param numBuckets the number of buckets to divide the time periods into.
      */
     public LatencyStatAndMetric(int numBuckets) {
@@ -69,7 +71,7 @@ public class LatencyStatAndMetric implements IMetric {
     }
 
     /**
-     * Constructor
+     * Constructor.
      *
      * @param numBuckets the number of buckets to divide the time periods into.
      * @param startTime  if positive the simulated time to start the from.
@@ -78,46 +80,46 @@ public class LatencyStatAndMetric implements IMetric {
         numBuckets = Math.max(numBuckets, 2);
         //We want to capture the full time range, so the target size is as
         // if we had one bucket less, then we do
-        _tmSize = 10 * 60 * 1000 / (numBuckets - 1);
-        _thSize = 3 * 60 * 60 * 1000 / (numBuckets - 1);
-        _odSize = 24 * 60 * 60 * 1000 / (numBuckets - 1);
-        if (_tmSize < 1 || _thSize < 1 || _odSize < 1) {
+        tmSize = 10 * 60 * 1000 / (numBuckets - 1);
+        thSize = 3 * 60 * 60 * 1000 / (numBuckets - 1);
+        odSize = 24 * 60 * 60 * 1000 / (numBuckets - 1);
+        if (tmSize < 1 || thSize < 1 || odSize < 1) {
             throw new IllegalArgumentException("number of buckets is too large to be supported");
         }
-        _tmLatBuckets = new long[numBuckets];
-        _tmCountBuckets = new long[numBuckets];
-        _tmTime = new long[numBuckets];
-        _thLatBuckets = new long[numBuckets];
-        _thCountBuckets = new long[numBuckets];
-        _thTime = new long[numBuckets];
-        _odLatBuckets = new long[numBuckets];
-        _odCountBuckets = new long[numBuckets];
-        _odTime = new long[numBuckets];
-        _allTimeLat = 0;
-        _allTimeCount = 0;
-        _exactExtraLat = 0;
-        _exactExtraCount = 0;
-
-        _bucketStart = startTime >= 0 ? startTime : System.currentTimeMillis();
-        _currentLatBucket = 0;
-        _currentCountBucket = 0;
+        tmLatBuckets = new long[numBuckets];
+        tmCountBuckets = new long[numBuckets];
+        tmTime = new long[numBuckets];
+        thLatBuckets = new long[numBuckets];
+        thCountBuckets = new long[numBuckets];
+        thTime = new long[numBuckets];
+        odLatBuckets = new long[numBuckets];
+        odCountBuckets = new long[numBuckets];
+        odTime = new long[numBuckets];
+        allTimeLat = 0;
+        allTimeCount = 0;
+        exactExtraLat = 0;
+        exactExtraCount = 0;
+
+        bucketStart = startTime >= 0 ? startTime : System.currentTimeMillis();
+        currentLatBucket = 0;
+        currentCountBucket = 0;
         if (startTime < 0) {
-            _task = new Fresher();
-            MetricStatTimer._timer.scheduleAtFixedRate(_task, _tmSize, _tmSize);
+            task = new Fresher();
+            MetricStatTimer.timer.scheduleAtFixedRate(task, tmSize, tmSize);
         } else {
-            _task = null;
+            task = null;
         }
     }
 
     /**
-     * Record a specific latency
+     * Record a specific latency.
      *
      * @param latency what we are recording
      */
     public void record(long latency) {
-        synchronized (_currentLock) {
-            _currentLatBucket += latency;
-            _currentCountBucket++;
+        synchronized (currentLock) {
+            currentLatBucket += latency;
+            currentCountBucket++;
         }
     }
 
@@ -129,20 +131,22 @@ public class LatencyStatAndMetric implements IMetric {
     synchronized Object getValueAndReset(long now) {
         long lat;
         long count;
-        synchronized (_currentLock) {
-            lat = _currentLatBucket;
-            count = _currentCountBucket;
-            _currentLatBucket = 0;
-            _currentCountBucket = 0;
+        synchronized (currentLock) {
+            lat = currentLatBucket;
+            count = currentCountBucket;
+            currentLatBucket = 0;
+            currentCountBucket = 0;
         }
 
-        long timeSpent = now - _bucketStart;
-        long exactExtraCountSum = count + _exactExtraCount;
+        @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance")
+        long timeSpent = now - bucketStart;
+        long exactExtraCountSum = count + exactExtraCount;
+        @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance")
         double ret = Utils.zeroIfNaNOrInf(
-            ((double) (lat + _exactExtraLat)) / exactExtraCountSum);
-        _bucketStart = now;
-        _exactExtraLat = 0;
-        _exactExtraCount = 0;
+            ((double) (lat + exactExtraLat)) / exactExtraCountSum);
+        bucketStart = now;
+        exactExtraLat = 0;
+        exactExtraCount = 0;
         rotateBuckets(lat, count, timeSpent);
         return ret;
     }
@@ -150,26 +154,26 @@ public class LatencyStatAndMetric implements IMetric {
     synchronized void rotateSched(long now) {
         long lat;
         long count;
-        synchronized (_currentLock) {
-            lat = _currentLatBucket;
-            count = _currentCountBucket;
-            _currentLatBucket = 0;
-            _currentCountBucket = 0;
+        synchronized (currentLock) {
+            lat = currentLatBucket;
+            count = currentCountBucket;
+            currentLatBucket = 0;
+            currentCountBucket = 0;
         }
 
-        long timeSpent = now - _bucketStart;
-        _exactExtraLat += lat;
-        _exactExtraCount += count;
-        _bucketStart = now;
+        exactExtraLat += lat;
+        exactExtraCount += count;
+        long timeSpent = now - bucketStart;
+        bucketStart = now;
         rotateBuckets(lat, count, timeSpent);
     }
 
     synchronized void rotateBuckets(long lat, long count, long timeSpent) {
-        rotate(lat, count, timeSpent, _tmSize, _tmTime, _tmLatBuckets, _tmCountBuckets);
-        rotate(lat, count, timeSpent, _thSize, _thTime, _thLatBuckets, _thCountBuckets);
-        rotate(lat, count, timeSpent, _odSize, _odTime, _odLatBuckets, _odCountBuckets);
-        _allTimeLat += lat;
-        _allTimeCount += count;
+        rotate(lat, count, timeSpent, tmSize, tmTime, tmLatBuckets, tmCountBuckets);
+        rotate(lat, count, timeSpent, thSize, thTime, thLatBuckets, thCountBuckets);
+        rotate(lat, count, timeSpent, odSize, odTime, odLatBuckets, odCountBuckets);
+        allTimeLat += lat;
+        allTimeCount += count;
     }
 
     private synchronized void rotate(long lat, long count, long timeSpent, long targetSize,
@@ -199,6 +203,7 @@ public class LatencyStatAndMetric implements IMetric {
     }
 
     /**
+     * Get time latency average.
      * @return a map of time window to average latency. Keys are "600" for last 10 mins "10800" for the last 3 hours "86400" for the last
      *     day ":all-time" for all time
      */
@@ -210,17 +215,17 @@ public class LatencyStatAndMetric implements IMetric {
         Map<String, Double> ret = new HashMap<>();
         long lat;
         long count;
-        synchronized (_currentLock) {
-            lat = _currentLatBucket;
-            count = _currentCountBucket;
+        synchronized (currentLock) {
+            lat = currentLatBucket;
+            count = currentCountBucket;
         }
-        long timeSpent = now - _bucketStart;
-        ret.put("600", readApproximateLatAvg(lat, count, timeSpent, _tmTime, _tmLatBuckets, _tmCountBuckets, 600 * 1000));
-        ret.put("10800", readApproximateLatAvg(lat, count, timeSpent, _thTime, _thLatBuckets, _thCountBuckets, 10800 * 1000));
-        ret.put("86400", readApproximateLatAvg(lat, count, timeSpent, _odTime, _odLatBuckets, _odCountBuckets, 86400 * 1000));
-        long allTimeCountSum = count + _allTimeCount;
+        long timeSpent = now - bucketStart;
+        ret.put("600", readApproximateLatAvg(lat, count, timeSpent, tmTime, tmLatBuckets, tmCountBuckets, 600 * 1000));
+        ret.put("10800", readApproximateLatAvg(lat, count, timeSpent, thTime, thLatBuckets, thCountBuckets, 10800 * 1000));
+        ret.put("86400", readApproximateLatAvg(lat, count, timeSpent, odTime, odLatBuckets, odCountBuckets, 86400 * 1000));
+        long allTimeCountSum = count + allTimeCount;
         ret.put(":all-time", Utils.zeroIfNaNOrInf(
-            (double) lat + _allTimeLat) / allTimeCountSum);
+            (double) lat + allTimeLat) / allTimeCountSum);
         return ret;
     }
 
@@ -239,8 +244,8 @@ public class LatencyStatAndMetric implements IMetric {
     }
 
     public void close() {
-        if (_task != null) {
-            _task.cancel();
+        if (task != null) {
+            task.cancel();
         }
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/metric/internal/MetricStatTimer.java b/storm-client/src/jvm/org/apache/storm/metric/internal/MetricStatTimer.java
index 4541a25..41b87a5 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/internal/MetricStatTimer.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/internal/MetricStatTimer.java
@@ -15,8 +15,8 @@ package org.apache.storm.metric.internal;
 import java.util.Timer;
 
 /**
- * Just holds a singleton metric/stat timer for use by metric/stat calculations
+ * Just holds a singleton metric/stat timer for use by metric/stat calculations.
  */
 class MetricStatTimer {
-    static Timer _timer = new Timer("metric/stat timer", true);
+    static Timer timer = new Timer("metric/stat timer", true);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/internal/MultiCountStatAndMetric.java b/storm-client/src/jvm/org/apache/storm/metric/internal/MultiCountStatAndMetric.java
index 1d44863..8e0b093 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/internal/MultiCountStatAndMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/internal/MultiCountStatAndMetric.java
@@ -22,24 +22,26 @@ import org.apache.storm.metric.api.IMetric;
  * Acts as a MultiCount Metric, but keeps track of approximate counts for the last 10 mins, 3 hours, 1 day, and all time. for the same keys
  */
 public class MultiCountStatAndMetric<T> implements IMetric {
-    private final int _numBuckets;
-    private ConcurrentHashMap<T, CountStatAndMetric> _counts = new ConcurrentHashMap<>();
+    private final int numBuckets;
+    private ConcurrentHashMap<T, CountStatAndMetric> counts = new ConcurrentHashMap<>();
 
     /**
+     * Constructor.
+     *
      * @param numBuckets the number of buckets to divide the time periods into.
      */
     public MultiCountStatAndMetric(int numBuckets) {
-        _numBuckets = numBuckets;
+        this.numBuckets = numBuckets;
     }
 
     CountStatAndMetric get(T key) {
-        CountStatAndMetric c = _counts.get(key);
+        CountStatAndMetric c = counts.get(key);
         if (c == null) {
             synchronized (this) {
-                c = _counts.get(key);
+                c = counts.get(key);
                 if (c == null) {
-                    c = new CountStatAndMetric(_numBuckets);
-                    _counts.put(key, c);
+                    c = new CountStatAndMetric(numBuckets);
+                    counts.put(key, c);
                 }
             }
         }
@@ -68,12 +70,12 @@ public class MultiCountStatAndMetric<T> implements IMetric {
     @Override
     public Object getValueAndReset() {
         Map<String, Long> ret = new HashMap<String, Long>();
-        for (Map.Entry<T, CountStatAndMetric> entry : _counts.entrySet()) {
+        for (Map.Entry<T, CountStatAndMetric> entry : counts.entrySet()) {
             String key = keyToString(entry.getKey());
             //There could be collisions if keyToString returns only part of a result.
             Long val = (Long) entry.getValue().getValueAndReset();
             Long other = ret.get(key);
-            val += other == null ? 0l : other;
+            val += other == null ? 0L : other;
             ret.put(key, val);
         }
         return ret;
@@ -81,7 +83,7 @@ public class MultiCountStatAndMetric<T> implements IMetric {
 
     public Map<String, Map<T, Long>> getTimeCounts() {
         Map<String, Map<T, Long>> ret = new HashMap<>();
-        for (Map.Entry<T, CountStatAndMetric> entry : _counts.entrySet()) {
+        for (Map.Entry<T, CountStatAndMetric> entry : counts.entrySet()) {
             T key = entry.getKey();
             Map<String, Long> toFlip = entry.getValue().getTimeCounts();
             for (Map.Entry<String, Long> subEntry : toFlip.entrySet()) {
@@ -98,7 +100,7 @@ public class MultiCountStatAndMetric<T> implements IMetric {
     }
 
     public void close() {
-        for (CountStatAndMetric cc : _counts.values()) {
+        for (CountStatAndMetric cc : counts.values()) {
             cc.close();
         }
     }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/internal/MultiLatencyStatAndMetric.java b/storm-client/src/jvm/org/apache/storm/metric/internal/MultiLatencyStatAndMetric.java
index e94bc62..eae373c 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/internal/MultiLatencyStatAndMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/internal/MultiLatencyStatAndMetric.java
@@ -23,24 +23,26 @@ import org.apache.storm.metric.api.IMetric;
  * the same keys
  */
 public class MultiLatencyStatAndMetric<T> implements IMetric {
-    private final int _numBuckets;
-    private ConcurrentHashMap<T, LatencyStatAndMetric> _lat = new ConcurrentHashMap<>();
+    private final int numBuckets;
+    private ConcurrentHashMap<T, LatencyStatAndMetric> lat = new ConcurrentHashMap<>();
 
     /**
+     * Constructor.
+     *
      * @param numBuckets the number of buckets to divide the time periods into.
      */
     public MultiLatencyStatAndMetric(int numBuckets) {
-        _numBuckets = numBuckets;
+        this.numBuckets = numBuckets;
     }
 
     LatencyStatAndMetric get(T key) {
-        LatencyStatAndMetric c = _lat.get(key);
+        LatencyStatAndMetric c = lat.get(key);
         if (c == null) {
             synchronized (this) {
-                c = _lat.get(key);
+                c = lat.get(key);
                 if (c == null) {
-                    c = new LatencyStatAndMetric(_numBuckets);
-                    _lat.put(key, c);
+                    c = new LatencyStatAndMetric(numBuckets);
+                    lat.put(key, c);
                 }
             }
         }
@@ -48,7 +50,7 @@ public class MultiLatencyStatAndMetric<T> implements IMetric {
     }
 
     /**
-     * Record a latency value
+     * Record a latency value.
      *
      * @param latency the measurement to record
      */
@@ -69,7 +71,7 @@ public class MultiLatencyStatAndMetric<T> implements IMetric {
     @Override
     public Object getValueAndReset() {
         Map<String, Double> ret = new HashMap<String, Double>();
-        for (Map.Entry<T, LatencyStatAndMetric> entry : _lat.entrySet()) {
+        for (Map.Entry<T, LatencyStatAndMetric> entry : lat.entrySet()) {
             String key = keyToString(entry.getKey());
             Double val = (Double) entry.getValue().getValueAndReset();
             ret.put(key, val);
@@ -79,7 +81,7 @@ public class MultiLatencyStatAndMetric<T> implements IMetric {
 
     public Map<String, Map<T, Double>> getTimeLatAvg() {
         Map<String, Map<T, Double>> ret = new HashMap<>();
-        for (Map.Entry<T, LatencyStatAndMetric> entry : _lat.entrySet()) {
+        for (Map.Entry<T, LatencyStatAndMetric> entry : lat.entrySet()) {
             T key = entry.getKey();
             Map<String, Double> toFlip = entry.getValue().getTimeLatAvg();
             for (Map.Entry<String, Double> subEntry : toFlip.entrySet()) {
@@ -96,7 +98,7 @@ public class MultiLatencyStatAndMetric<T> implements IMetric {
     }
 
     public void close() {
-        for (LatencyStatAndMetric l : _lat.values()) {
+        for (LatencyStatAndMetric l : lat.values()) {
             l.close();
         }
     }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/internal/RateTracker.java b/storm-client/src/jvm/org/apache/storm/metric/internal/RateTracker.java
index d0bbc74..94f3433 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/internal/RateTracker.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/internal/RateTracker.java
@@ -20,18 +20,20 @@ import java.util.concurrent.atomic.AtomicLong;
  * This class is a utility to track the rate of something.
  */
 public class RateTracker implements Closeable {
-    private final int _bucketSizeMillis;
+    private final int bucketSizeMillis;
     //Old Buckets and their length are only touched when rotating or gathering the metrics, which should not be that frequent
     // As such all access to them should be protected by synchronizing with the RateTracker instance
-    private final long[] _bucketTime;
-    private final long[] _oldBuckets;
+    private final long[] bucketTime;
+    private final long[] oldBuckets;
 
-    private final AtomicLong _bucketStart;
-    private final AtomicLong _currentBucket;
+    private final AtomicLong bucketStart;
+    private final AtomicLong currentBucket;
 
-    private final TimerTask _task;
+    private final TimerTask task;
 
     /**
+     * Constructor.
+     *
      * @param validTimeWindowInMils events that happened before validTimeWindowInMils are not considered when reporting the rate.
      * @param numBuckets            the number of time sildes to divide validTimeWindows. The more buckets, the smother the reported results
      *                              will be.
@@ -41,7 +43,7 @@ public class RateTracker implements Closeable {
     }
 
     /**
-     * Constructor
+     * Constructor.
      *
      * @param validTimeWindowInMils events that happened before validTimeWindow are not considered when reporting the rate.
      * @param numBuckets            the number of time sildes to divide validTimeWindows. The more buckets, the smother the reported results
@@ -50,34 +52,35 @@ public class RateTracker implements Closeable {
      */
     RateTracker(int validTimeWindowInMils, int numBuckets, long startTime) {
         numBuckets = Math.max(numBuckets, 1);
-        _bucketSizeMillis = validTimeWindowInMils / numBuckets;
-        if (_bucketSizeMillis < 1) {
+        bucketSizeMillis = validTimeWindowInMils / numBuckets;
+        if (bucketSizeMillis < 1) {
             throw new IllegalArgumentException(
                 "validTimeWindowInMilis and numOfSildes cause each slide to have a window that is too small");
         }
-        _bucketTime = new long[numBuckets - 1];
-        _oldBuckets = new long[numBuckets - 1];
+        bucketTime = new long[numBuckets - 1];
+        oldBuckets = new long[numBuckets - 1];
 
-        _bucketStart = new AtomicLong(startTime >= 0 ? startTime : System.currentTimeMillis());
-        _currentBucket = new AtomicLong(0);
+        bucketStart = new AtomicLong(startTime >= 0 ? startTime : System.currentTimeMillis());
+        currentBucket = new AtomicLong(0);
         if (startTime < 0) {
-            _task = new Fresher();
-            MetricStatTimer._timer.scheduleAtFixedRate(_task, _bucketSizeMillis, _bucketSizeMillis);
+            task = new Fresher();
+            MetricStatTimer.timer.scheduleAtFixedRate(task, bucketSizeMillis, bucketSizeMillis);
         } else {
-            _task = null;
+            task = null;
         }
     }
 
     /**
-     * Notify the tracker upon new arrivals
+     * Notify the tracker upon new arrivals.
      *
      * @param count number of arrivals
      */
     public void notify(long count) {
-        _currentBucket.addAndGet(count);
+        currentBucket.addAndGet(count);
     }
 
     /**
+     * Get report rate.
      * @return the approximate average rate per second.
      */
     public synchronized double reportRate() {
@@ -85,11 +88,11 @@ public class RateTracker implements Closeable {
     }
 
     synchronized double reportRate(long currentTime) {
-        long duration = Math.max(1l, currentTime - _bucketStart.get());
-        long events = _currentBucket.get();
-        for (int i = 0; i < _oldBuckets.length; i++) {
-            events += _oldBuckets[i];
-            duration += _bucketTime[i];
+        long duration = Math.max(1L, currentTime - bucketStart.get());
+        long events = currentBucket.get();
+        for (int i = 0; i < oldBuckets.length; i++) {
+            events += oldBuckets[i];
+            duration += bucketTime[i];
         }
 
         return events * 1000.0 / duration;
@@ -97,8 +100,8 @@ public class RateTracker implements Closeable {
 
     @Override
     public void close() {
-        if (_task != null) {
-            _task.cancel();
+        if (task != null) {
+            task.cancel();
         }
     }
 
@@ -108,7 +111,7 @@ public class RateTracker implements Closeable {
      * @param numToEclipse the number of rotations to perform.
      */
     final void forceRotate(int numToEclipse, long interval) {
-        long time = _bucketStart.get();
+        long time = bucketStart.get();
         for (int i = 0; i < numToEclipse; i++) {
             time += interval;
             rotateBuckets(time);
@@ -116,15 +119,15 @@ public class RateTracker implements Closeable {
     }
 
     private synchronized void rotateBuckets(long time) {
-        long timeSpent = time - _bucketStart.getAndSet(time);
-        long currentVal = _currentBucket.getAndSet(0);
-        for (int i = 0; i < _oldBuckets.length; i++) {
-            long tmpTime = _bucketTime[i];
-            _bucketTime[i] = timeSpent;
+        long timeSpent = time - bucketStart.getAndSet(time);
+        long currentVal = currentBucket.getAndSet(0);
+        for (int i = 0; i < oldBuckets.length; i++) {
+            long tmpTime = bucketTime[i];
+            bucketTime[i] = timeSpent;
             timeSpent = tmpTime;
 
-            long cnt = _oldBuckets[i];
-            _oldBuckets[i] = currentVal;
+            long cnt = oldBuckets[i];
+            oldBuckets[i] = currentVal;
             currentVal = cnt;
         }
     }
diff --git a/storm-client/src/jvm/org/apache/storm/multilang/BoltMsg.java b/storm-client/src/jvm/org/apache/storm/multilang/BoltMsg.java
index 5153664..956780f 100644
--- a/storm-client/src/jvm/org/apache/storm/multilang/BoltMsg.java
+++ b/storm-client/src/jvm/org/apache/storm/multilang/BoltMsg.java
@@ -18,8 +18,9 @@ import java.util.List;
  * BoltMsg is an object that represents the data sent from a shell component to a bolt process that implements a multi-language protocol. It
  * is the union of all data types that a bolt can receive from Storm.
  *
- * BoltMsgs are objects sent to the ISerializer interface, for serialization according to the wire protocol implemented by the serializer.
- * The BoltMsg class allows for a decoupling between the serialized representation of the data and the data itself.
+ * <p>BoltMsgs are objects sent to the ISerializer interface, for serialization according to the wire protocol
+ * implemented by the serializer. The BoltMsg class allows for a decoupling between the serialized representation of the
+ * data and the data itself.
  */
 public class BoltMsg {
     private String id;
diff --git a/storm-client/src/jvm/org/apache/storm/multilang/ISerializer.java b/storm-client/src/jvm/org/apache/storm/multilang/ISerializer.java
index 1ac87a8..bc7c3ac 100644
--- a/storm-client/src/jvm/org/apache/storm/multilang/ISerializer.java
+++ b/storm-client/src/jvm/org/apache/storm/multilang/ISerializer.java
@@ -27,7 +27,7 @@ import org.apache.storm.task.TopologyContext;
 public interface ISerializer extends Serializable {
 
     /**
-     * This method sets the input and output streams of the serializer
+     * This method sets the input and output streams of the serializer.
      *
      * @param processIn  output stream to non-JVM component
      * @param processOut input stream from non-JVM component
@@ -45,28 +45,28 @@ public interface ISerializer extends Serializable {
         NoOutputException;
 
     /**
-     * This method receives a shell message from the non-JVM process
+     * This method receives a shell message from the non-JVM process.
      *
      * @return shell message
      */
     ShellMsg readShellMsg() throws IOException, NoOutputException;
 
     /**
-     * This method sends a bolt message to a non-JVM bolt process
+     * This method sends a bolt message to a non-JVM bolt process.
      *
      * @param msg bolt message
      */
     void writeBoltMsg(BoltMsg msg) throws IOException;
 
     /**
-     * This method sends a spout message to a non-JVM spout process
+     * This method sends a spout message to a non-JVM spout process.
      *
      * @param msg spout message
      */
     void writeSpoutMsg(SpoutMsg msg) throws IOException;
 
     /**
-     * This method sends a list of task IDs to a non-JVM bolt process
+     * This method sends a list of task IDs to a non-JVM bolt process.
      *
      * @param taskIds list of task IDs
      */
diff --git a/storm-client/src/jvm/org/apache/storm/multilang/JsonSerializer.java b/storm-client/src/jvm/org/apache/storm/multilang/JsonSerializer.java
index 6b44d19..674466e 100644
--- a/storm-client/src/jvm/org/apache/storm/multilang/JsonSerializer.java
+++ b/storm-client/src/jvm/org/apache/storm/multilang/JsonSerializer.java
@@ -123,8 +123,8 @@ public class JsonSerializer implements ISerializer {
             shellMsg.setTask(0);
         }
 
-        Object need_task_ids = msg.get("need_task_ids");
-        if (need_task_ids == null || ((Boolean) need_task_ids).booleanValue()) {
+        Object needTaskIds = msg.get("need_task_ids");
+        if (needTaskIds == null || ((Boolean) needTaskIds).booleanValue()) {
             shellMsg.setNeedTaskIds(true);
... 21726 lines suppressed ...