You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by le...@apache.org on 2019/12/06 04:59:35 UTC

[incubator-hudi] branch master updated: [HUDI-379] Refactor the codes based on new JavadocStyle code style rule (#1079)

This is an automated email from the ASF dual-hosted git repository.

leesf pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 2745b75  [HUDI-379] Refactor the codes based on new JavadocStyle code style rule (#1079)
2745b75 is described below

commit 2745b7552f2f2ee7a61d3ea49139ef2af3ffe13f
Author: lamber-ken <la...@163.com>
AuthorDate: Fri Dec 6 12:59:28 2019 +0800

    [HUDI-379] Refactor the codes based on new JavadocStyle code style rule (#1079)
---
 .../org/apache/hudi/CompactionAdminClient.java     | 12 ++---
 .../java/org/apache/hudi/HoodieCleanClient.java    |  4 +-
 .../java/org/apache/hudi/HoodieReadClient.java     |  2 +-
 .../java/org/apache/hudi/HoodieWriteClient.java    | 24 +++++-----
 .../client/embedded/EmbeddedTimelineService.java   |  4 +-
 .../org/apache/hudi/client/utils/ClientUtils.java  |  2 +-
 .../apache/hudi/config/HoodieCompactionConfig.java |  6 +--
 .../apache/hudi/config/HoodieHBaseIndexConfig.java | 16 +++----
 .../org/apache/hudi/config/HoodieIndexConfig.java  |  2 +-
 .../org/apache/hudi/config/HoodieMemoryConfig.java |  2 +-
 .../apache/hudi/config/HoodieStorageConfig.java    |  2 +-
 .../org/apache/hudi/config/HoodieWriteConfig.java  | 24 +++++-----
 .../hudi/exception/HoodieAppendException.java      |  2 +-
 .../hudi/exception/HoodieCommitException.java      |  2 +-
 .../HoodieDependentSystemUnavailableException.java |  2 +-
 .../hudi/exception/HoodieInsertException.java      |  2 +-
 .../hudi/exception/HoodieUpsertException.java      |  2 +-
 .../apache/hudi/func/BulkInsertMapFunction.java    |  2 +-
 .../hudi/func/CopyOnWriteLazyInsertIterable.java   |  2 +-
 .../org/apache/hudi/func/LazyIterableIterator.java |  2 +-
 .../java/org/apache/hudi/index/HoodieIndex.java    |  2 +-
 .../org/apache/hudi/index/InMemoryHashIndex.java   |  2 +-
 .../hudi/index/bloom/BloomIndexFileInfo.java       |  4 +-
 .../apache/hudi/index/bloom/HoodieBloomIndex.java  |  6 +--
 .../index/bloom/HoodieBloomIndexCheckFunction.java |  2 +-
 .../hudi/index/bloom/HoodieGlobalBloomIndex.java   |  2 +-
 .../IntervalTreeBasedGlobalIndexFileFilter.java    |  2 +-
 .../bloom/IntervalTreeBasedIndexFileFilter.java    |  2 +-
 .../apache/hudi/index/bloom/KeyLookupResult.java   |  2 +-
 .../org/apache/hudi/index/bloom/KeyRangeNode.java  |  2 +-
 .../bloom/ListBasedGlobalIndexFileFilter.java      |  2 +-
 .../hudi/index/bloom/ListBasedIndexFileFilter.java |  2 +-
 .../org/apache/hudi/index/hbase/HBaseIndex.java    | 10 ++---
 .../hbase/HBaseIndexQPSResourceAllocator.java      |  4 +-
 .../org/apache/hudi/io/HoodieAppendHandle.java     |  2 +-
 .../java/org/apache/hudi/io/HoodieCleanHelper.java |  7 +--
 .../org/apache/hudi/io/HoodieCommitArchiveLog.java |  4 +-
 .../org/apache/hudi/io/HoodieCreateHandle.java     |  6 +--
 .../org/apache/hudi/io/HoodieKeyLookupHandle.java  |  2 +-
 .../java/org/apache/hudi/io/HoodieMergeHandle.java | 10 ++---
 .../org/apache/hudi/io/HoodieRangeInfoHandle.java  |  2 +-
 .../java/org/apache/hudi/io/HoodieWriteHandle.java |  8 ++--
 .../apache/hudi/io/compact/HoodieCompactor.java    |  6 +--
 .../strategy/BoundedIOCompactionStrategy.java      |  2 +-
 .../io/compact/strategy/CompactionStrategy.java    |  2 +-
 .../LogFileSizeBasedCompactionStrategy.java        |  2 +-
 .../org/apache/hudi/metrics/MetricsReporter.java   |  4 +-
 .../apache/hudi/table/HoodieCopyOnWriteTable.java  | 24 +++++-----
 .../apache/hudi/table/HoodieMergeOnReadTable.java  |  2 +-
 .../java/org/apache/hudi/table/HoodieTable.java    | 52 +++++++++++-----------
 .../org/apache/hudi/table/RollbackExecutor.java    |  8 ++--
 .../org/apache/hudi/table/RollbackRequest.java     | 14 +++---
 .../org/apache/hudi/table/WorkloadProfile.java     |  6 +--
 .../java/org/apache/hudi/TestAsyncCompaction.java  |  4 +-
 .../src/test/java/org/apache/hudi/TestCleaner.java | 46 +++++++++----------
 .../java/org/apache/hudi/TestClientRollback.java   |  8 ++--
 .../org/apache/hudi/TestCompactionAdminClient.java |  8 ++--
 .../java/org/apache/hudi/TestHoodieClientBase.java | 36 +++++++--------
 .../hudi/TestHoodieClientOnCopyOnWriteStorage.java | 52 +++++++++++-----------
 .../java/org/apache/hudi/TestHoodieReadClient.java | 20 ++++-----
 .../apache/hudi/common/HoodieClientTestUtils.java  |  2 +-
 .../hudi/common/HoodieMergeOnReadTestUtils.java    |  2 +-
 .../apache/hudi/func/TestBoundedInMemoryQueue.java |  2 +-
 .../hudi/index/bloom/TestKeyRangeLookupTree.java   |  8 ++--
 .../org/apache/hudi/io/TestHoodieMergeHandle.java  |  4 +-
 .../apache/hudi/table/TestMergeOnReadTable.java    |  4 +-
 .../common/model/EmptyHoodieRecordPayload.java     |  2 +-
 .../hudi/common/table/log/TestHoodieLogFormat.java |  2 +-
 .../com/uber/hoodie/hadoop/HoodieInputFormat.java  |  2 +-
 .../hadoop/realtime/HoodieRealtimeInputFormat.java |  2 +-
 .../hudi/hadoop/HoodieROTablePathFilter.java       |  4 +-
 .../hudi/hadoop/RecordReaderValueIterator.java     |  4 +-
 .../hudi/hadoop/UseFileSplitsFromInputFormat.java  |  2 +-
 .../hadoop/hive/HoodieCombineHiveInputFormat.java  | 12 ++---
 .../realtime/AbstractRealtimeRecordReader.java     |  6 +--
 .../realtime/HoodieParquetRealtimeInputFormat.java |  4 +-
 .../hudi/hadoop/realtime/HoodieParquetSerde.java   |  2 +-
 .../realtime/HoodieRealtimeRecordReader.java       |  2 +-
 .../realtime/RealtimeUnmergedRecordReader.java     |  2 +-
 .../hudi/hadoop/TestRecordReaderValueIterator.java |  2 +-
 .../java/org/apache/hudi/hive/HiveSyncTool.java    |  2 +-
 .../org/apache/hudi/hive/HoodieHiveClient.java     | 20 ++++-----
 .../apache/hudi/hive/NonPartitionedExtractor.java  |  2 +-
 .../org/apache/hudi/hive/SchemaDifference.java     |  2 +-
 .../java/org/apache/hudi/hive/util/SchemaUtil.java | 16 +++----
 .../org/apache/hudi/hive/TestHiveSyncTool.java     |  2 +-
 .../org/apache/hudi/hive/util/HiveTestService.java |  2 +-
 .../main/java/org/apache/hudi/BaseAvroPayload.java |  6 +--
 .../main/java/org/apache/hudi/DataSourceUtils.java |  4 +-
 .../org/apache/hudi/HoodieDataSourceHelpers.java   |  4 +-
 .../main/java/org/apache/hudi/KeyGenerator.java    |  2 +-
 .../apache/hudi/NonpartitionedKeyGenerator.java    |  2 +-
 hudi-spark/src/test/java/HoodieJavaApp.java        |  4 +-
 .../src/test/java/HoodieJavaStreamingApp.java      |  8 ++--
 .../timeline/service/FileSystemViewHandler.java    | 12 ++---
 .../hudi/timeline/service/TimelineService.java     |  2 +-
 .../timeline/service/handlers/DataFileHandler.java |  2 +-
 .../service/handlers/FileSliceHandler.java         |  2 +-
 .../timeline/service/handlers/TimelineHandler.java |  2 +-
 .../view/TestRemoteHoodieTableFileSystemView.java  |  2 +-
 .../apache/hudi/utilities/HDFSParquetImporter.java |  4 +-
 .../org/apache/hudi/utilities/HoodieCleaner.java   |  6 +--
 .../hudi/utilities/HoodieCompactionAdminTool.java  |  8 ++--
 .../org/apache/hudi/utilities/UtilHelpers.java     |  8 ++--
 .../adhoc/UpgradePayloadFromUberToApache.java      |  2 +-
 .../AbstractDeltaStreamerService.java              |  6 +--
 .../hudi/utilities/deltastreamer/Compactor.java    |  2 +-
 .../hudi/utilities/deltastreamer/DeltaSync.java    | 43 +++++++++---------
 .../deltastreamer/HoodieDeltaStreamer.java         | 28 ++++++------
 .../deltastreamer/SchedulerConfGenerator.java      |  2 +-
 .../deltastreamer/SourceFormatAdapter.java         |  2 +-
 .../keygen/TimestampBasedKeyGenerator.java         |  2 +-
 .../utilities/schema/FilebasedSchemaProvider.java  |  4 +-
 .../hudi/utilities/schema/SchemaProvider.java      |  2 +-
 .../utilities/schema/SchemaRegistryProvider.java   |  4 +-
 .../hudi/utilities/sources/AvroDFSSource.java      |  2 +-
 .../hudi/utilities/sources/AvroKafkaSource.java    |  2 +-
 .../hudi/utilities/sources/HiveIncrPullSource.java |  2 +-
 .../hudi/utilities/sources/HoodieIncrSource.java   | 12 ++---
 .../hudi/utilities/sources/JsonDFSSource.java      |  2 +-
 .../hudi/utilities/sources/JsonKafkaSource.java    |  2 +-
 .../hudi/utilities/sources/ParquetDFSSource.java   |  2 +-
 .../org/apache/hudi/utilities/sources/Source.java  |  2 +-
 .../utilities/sources/helpers/AvroConvertor.java   |  6 +--
 .../utilities/sources/helpers/DFSPathSelector.java |  2 +-
 .../sources/helpers/IncrSourceHelper.java          |  6 +--
 .../utilities/sources/helpers/KafkaOffsetGen.java  |  4 +-
 .../utilities/transform/FlatteningTransformer.java |  4 +-
 .../utilities/transform/IdentityTransformer.java   |  2 +-
 .../transform/SqlQueryBasedTransformer.java        |  2 +-
 .../hudi/utilities/transform/Transformer.java      |  4 +-
 .../hudi/utilities/TestHoodieDeltaStreamer.java    |  8 ++--
 .../apache/hudi/utilities/UtilitiesTestBase.java   |  4 +-
 .../hudi/utilities/sources/TestDFSSource.java      |  2 +-
 .../hudi/utilities/sources/TestKafkaSource.java    |  2 +-
 .../utilities/sources/config/TestSourceConfig.java |  2 +-
 style/checkstyle.xml                               |  5 +--
 137 files changed, 434 insertions(+), 433 deletions(-)

diff --git a/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java b/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java
index 6034408..76b6631 100644
--- a/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java
@@ -61,7 +61,7 @@ import java.util.stream.Collectors;
 import static org.apache.hudi.common.table.HoodieTimeline.COMPACTION_ACTION;
 
 /**
- * Client to perform admin operations related to compaction
+ * Client to perform admin operations related to compaction.
  */
 public class CompactionAdminClient extends AbstractHoodieClient {
 
@@ -214,7 +214,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
   }
 
   /**
-   * Construction Compaction Plan from compaction instant
+   * Construction Compaction Plan from compaction instant.
    */
   private static HoodieCompactionPlan getCompactionPlan(HoodieTableMetaClient metaClient, String compactionInstant)
       throws IOException {
@@ -273,7 +273,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
   }
 
   /**
-   * Check if a compaction operation is valid
+   * Check if a compaction operation is valid.
    *
    * @param metaClient Hoodie Table Meta client
    * @param compactionInstant Compaction Instant
@@ -342,7 +342,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
   }
 
   /**
-   * Execute Renaming operation
+   * Execute Renaming operation.
    *
    * @param metaClient HoodieTable MetaClient
    * @param renameActions List of rename operations
@@ -484,7 +484,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
   }
 
   /**
-   * Holds Operation result for Renaming
+   * Holds Operation result for Renaming.
    */
   public static class RenameOpResult extends OperationResult<RenameInfo> {
 
@@ -505,7 +505,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
   }
 
   /**
-   * Holds Operation result for Renaming
+   * Holds Operation result for Renaming.
    */
   public static class ValidationOpResult extends OperationResult<CompactionOperation> {
 
diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java
index e08ecfb..c35a0b4 100644
--- a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java
@@ -103,7 +103,7 @@ public class HoodieCleanClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Creates a Cleaner plan if there are files to be cleaned and stores them in instant file
+   * Creates a Cleaner plan if there are files to be cleaned and stores them in instant file.
    *
    * @param startCleanTime Cleaner Instant Time
    * @return Cleaner Plan if generated
@@ -133,7 +133,7 @@ public class HoodieCleanClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Executes the Cleaner plan stored in the instant metadata
+   * Executes the Cleaner plan stored in the instant metadata.
    *
    * @param table Hoodie Table
    * @param cleanInstantTs Cleaner Instant Timestamp
diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieReadClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieReadClient.java
index 16f79fb..3c4290c 100644
--- a/hudi-client/src/main/java/org/apache/hudi/HoodieReadClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/HoodieReadClient.java
@@ -145,7 +145,7 @@ public class HoodieReadClient<T extends HoodieRecordPayload> extends AbstractHoo
   }
 
   /**
-   * Given a bunch of hoodie keys, fetches all the individual records out as a data frame
+   * Given a bunch of hoodie keys, fetches all the individual records out as a data frame.
    *
    * @return a dataframe
    */
diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java
index 3ccba7d..4fdcc1f 100644
--- a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java
@@ -159,7 +159,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Upserts a bunch of new records into the Hoodie table, at the supplied commitTime
+   * Upserts a bunch of new records into the Hoodie table, at the supplied commitTime.
    */
   public JavaRDD<WriteStatus> upsert(JavaRDD<HoodieRecord<T>> records, final String commitTime) {
     HoodieTable<T> table = getTableAndInitCtx(OperationType.UPSERT);
@@ -505,14 +505,14 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Commit changes performed at the given commitTime marker
+   * Commit changes performed at the given commitTime marker.
    */
   public boolean commit(String commitTime, JavaRDD<WriteStatus> writeStatuses) {
     return commit(commitTime, writeStatuses, Option.empty());
   }
 
   /**
-   * Commit changes performed at the given commitTime marker
+   * Commit changes performed at the given commitTime marker.
    */
   public boolean commit(String commitTime, JavaRDD<WriteStatus> writeStatuses,
       Option<Map<String, String>> extraMetadata) {
@@ -988,7 +988,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Provides a new commit time for a write operation (insert/update)
+   * Provides a new commit time for a write operation (insert/update).
    */
   public String startCommit() {
     // NOTE : Need to ensure that rollback is done before a new commit is started
@@ -1027,7 +1027,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Schedules a new compaction instant
+   * Schedules a new compaction instant.
    */
   public Option<String> scheduleCompaction(Option<Map<String, String>> extraMetadata) throws IOException {
     String instantTime = HoodieActiveTimeline.createNewCommitTime();
@@ -1037,7 +1037,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Schedules a new compaction instant with passed-in instant time
+   * Schedules a new compaction instant with passed-in instant time.
    *
    * @param instantTime Compaction Instant Time
    * @param extraMetadata Extra Metadata to be stored
@@ -1074,7 +1074,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Performs Compaction for the workload stored in instant-time
+   * Performs Compaction for the workload stored in instant-time.
    *
    * @param compactionInstantTime Compaction Instant Time
    */
@@ -1141,7 +1141,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Cleanup all inflight commits
+   * Cleanup all inflight commits.
    */
   private void rollbackInflightCommits() {
     HoodieTable<T> table = HoodieTable.getHoodieTable(createMetaClient(true), config, jsc);
@@ -1197,7 +1197,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
    */
 
   /**
-   * Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time
+   * Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time.
    *
    * @param compactionInstantTime Compaction Instant Time
    */
@@ -1226,7 +1226,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Perform compaction operations as specified in the compaction commit file
+   * Perform compaction operations as specified in the compaction commit file.
    *
    * @param compactionInstant Compacton Instant time
    * @param activeTimeline Active Timeline
@@ -1254,7 +1254,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Commit Compaction and track metrics
+   * Commit Compaction and track metrics.
    *
    * @param compactedStatuses Compaction Write status
    * @param table Hoodie Table
@@ -1404,7 +1404,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
   }
 
   /**
-   * Refers to different operation types
+   * Refers to different operation types.
    */
   enum OperationType {
     INSERT,
diff --git a/hudi-client/src/main/java/org/apache/hudi/client/embedded/EmbeddedTimelineService.java b/hudi-client/src/main/java/org/apache/hudi/client/embedded/EmbeddedTimelineService.java
index 54aef32..d743aa7 100644
--- a/hudi-client/src/main/java/org/apache/hudi/client/embedded/EmbeddedTimelineService.java
+++ b/hudi-client/src/main/java/org/apache/hudi/client/embedded/EmbeddedTimelineService.java
@@ -33,7 +33,7 @@ import org.apache.spark.SparkConf;
 import java.io.IOException;
 
 /**
- * Timeline Service that runs as part of write client
+ * Timeline Service that runs as part of write client.
  */
 public class EmbeddedTimelineService {
 
@@ -86,7 +86,7 @@ public class EmbeddedTimelineService {
   }
 
   /**
-   * Retrieves proper view storage configs for remote clients to access this service
+   * Retrieves proper view storage configs for remote clients to access this service.
    */
   public FileSystemViewStorageConfig getRemoteFileSystemViewConfig() {
     return FileSystemViewStorageConfig.newBuilder().withStorageType(FileSystemViewStorageType.REMOTE_FIRST)
diff --git a/hudi-client/src/main/java/org/apache/hudi/client/utils/ClientUtils.java b/hudi-client/src/main/java/org/apache/hudi/client/utils/ClientUtils.java
index fe2998e..1249992 100644
--- a/hudi-client/src/main/java/org/apache/hudi/client/utils/ClientUtils.java
+++ b/hudi-client/src/main/java/org/apache/hudi/client/utils/ClientUtils.java
@@ -26,7 +26,7 @@ import org.apache.spark.api.java.JavaSparkContext;
 public class ClientUtils {
 
   /**
-   * Create Consistency Aware MetaClient
+   * Create Consistency Aware MetaClient.
    *
    * @param jsc JavaSparkContext
    * @param config HoodieWriteConfig
diff --git a/hudi-client/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java b/hudi-client/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
index 2f1c3e7..4ecc87f 100644
--- a/hudi-client/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
+++ b/hudi-client/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
@@ -33,7 +33,7 @@ import java.io.IOException;
 import java.util.Properties;
 
 /**
- * Compaction related config
+ * Compaction related config.
  */
 @Immutable
 public class HoodieCompactionConfig extends DefaultHoodieConfig {
@@ -55,8 +55,8 @@ public class HoodieCompactionConfig extends DefaultHoodieConfig {
   // By default, treat any file <= 100MB as a small file.
   public static final String DEFAULT_PARQUET_SMALL_FILE_LIMIT_BYTES = String.valueOf(104857600);
   /**
-   * Configs related to specific table types
-   **/
+   * Configs related to specific table types.
+   */
   // Number of inserts, that will be put each partition/bucket for writing
   public static final String COPY_ON_WRITE_TABLE_INSERT_SPLIT_SIZE = "hoodie.copyonwrite.insert" + ".split.size";
   // The rationale to pick the insert parallelism is the following. Writing out 100MB files,
diff --git a/hudi-client/src/main/java/org/apache/hudi/config/HoodieHBaseIndexConfig.java b/hudi-client/src/main/java/org/apache/hudi/config/HoodieHBaseIndexConfig.java
index 9fd32cb..73dfabd 100644
--- a/hudi-client/src/main/java/org/apache/hudi/config/HoodieHBaseIndexConfig.java
+++ b/hudi-client/src/main/java/org/apache/hudi/config/HoodieHBaseIndexConfig.java
@@ -34,17 +34,17 @@ public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
   public static final String HBASE_ZK_ZNODEPARENT = "hoodie.index.hbase.zknode.path";
   /**
    * Note that if HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP is set to true, this batch size will not be honored for HBase
-   * Puts
+   * Puts.
    */
   public static final String HBASE_PUT_BATCH_SIZE_PROP = "hoodie.index.hbase.put.batch.size";
 
   /**
-   * Property to set which implementation of HBase QPS resource allocator to be used
+   * Property to set which implementation of HBase QPS resource allocator to be used.
    */
   public static final String HBASE_INDEX_QPS_ALLOCATOR_CLASS = "hoodie.index.hbase.qps.allocator.class";
   public static final String DEFAULT_HBASE_INDEX_QPS_ALLOCATOR_CLASS = DefaultHBaseQPSResourceAllocator.class.getName();
   /**
-   * Property to set to enable auto computation of put batch size
+   * Property to set to enable auto computation of put batch size.
    */
   public static final String HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP = "hoodie.index.hbase.put.batch.size.autocompute";
   public static final String DEFAULT_HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE = "false";
@@ -62,7 +62,7 @@ public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
    */
   public static String HBASE_MAX_QPS_PER_REGION_SERVER_PROP = "hoodie.index.hbase.max.qps.per.region.server";
   /**
-   * Default batch size, used only for Get, but computed for Put
+   * Default batch size, used only for Get, but computed for Put.
    */
   public static final int DEFAULT_HBASE_BATCH_SIZE = 100;
   /**
@@ -70,17 +70,17 @@ public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
    */
   public static final int DEFAULT_HBASE_MAX_QPS_PER_REGION_SERVER = 1000;
   /**
-   * Default is 50%, which means a total of 2 jobs can run using HbaseIndex without overwhelming Region Servers
+   * Default is 50%, which means a total of 2 jobs can run using HbaseIndex without overwhelming Region Servers.
    */
   public static final float DEFAULT_HBASE_QPS_FRACTION = 0.5f;
 
   /**
-   * Property to decide if HBASE_QPS_FRACTION_PROP is dynamically calculated based on volume
+   * Property to decide if HBASE_QPS_FRACTION_PROP is dynamically calculated based on volume.
    */
   public static final String HOODIE_INDEX_COMPUTE_QPS_DYNAMICALLY = "hoodie.index.hbase.dynamic_qps";
   public static final boolean DEFAULT_HOODIE_INDEX_COMPUTE_QPS_DYNAMICALLY = false;
   /**
-   * Min and Max for HBASE_QPS_FRACTION_PROP to stabilize skewed volume workloads
+   * Min and Max for HBASE_QPS_FRACTION_PROP to stabilize skewed volume workloads.
    */
   public static final String HBASE_MIN_QPS_FRACTION_PROP = "hoodie.index.hbase.min.qps.fraction";
   public static final String DEFAULT_HBASE_MIN_QPS_FRACTION_PROP = "0.002";
@@ -88,7 +88,7 @@ public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
   public static final String HBASE_MAX_QPS_FRACTION_PROP = "hoodie.index.hbase.max.qps.fraction";
   public static final String DEFAULT_HBASE_MAX_QPS_FRACTION_PROP = "0.06";
   /**
-   * Hoodie index desired puts operation time in seconds
+   * Hoodie index desired puts operation time in seconds.
    */
   public static final String HOODIE_INDEX_DESIRED_PUTS_TIME_IN_SECS = "hoodie.index.hbase.desired_puts_time_in_secs";
   public static final int DEFAULT_HOODIE_INDEX_DESIRED_PUTS_TIME_IN_SECS = 600;
diff --git a/hudi-client/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java b/hudi-client/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
index de98955..abba272 100644
--- a/hudi-client/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
+++ b/hudi-client/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
@@ -29,7 +29,7 @@ import java.util.Properties;
 
 
 /**
- * Indexing related config
+ * Indexing related config.
  */
 @Immutable
 public class HoodieIndexConfig extends DefaultHoodieConfig {
diff --git a/hudi-client/src/main/java/org/apache/hudi/config/HoodieMemoryConfig.java b/hudi-client/src/main/java/org/apache/hudi/config/HoodieMemoryConfig.java
index 43b28cc..f19a64c 100644
--- a/hudi-client/src/main/java/org/apache/hudi/config/HoodieMemoryConfig.java
+++ b/hudi-client/src/main/java/org/apache/hudi/config/HoodieMemoryConfig.java
@@ -29,7 +29,7 @@ import java.io.IOException;
 import java.util.Properties;
 
 /**
- * Memory related config
+ * Memory related config.
  */
 @Immutable
 public class HoodieMemoryConfig extends DefaultHoodieConfig {
diff --git a/hudi-client/src/main/java/org/apache/hudi/config/HoodieStorageConfig.java b/hudi-client/src/main/java/org/apache/hudi/config/HoodieStorageConfig.java
index f9c98c7..24cf190 100644
--- a/hudi-client/src/main/java/org/apache/hudi/config/HoodieStorageConfig.java
+++ b/hudi-client/src/main/java/org/apache/hudi/config/HoodieStorageConfig.java
@@ -26,7 +26,7 @@ import java.io.IOException;
 import java.util.Properties;
 
 /**
- * Storage related config
+ * Storage related config.
  */
 @Immutable
 public class HoodieStorageConfig extends DefaultHoodieConfig {
diff --git a/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java b/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
index 3743116..3c2563d 100644
--- a/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
+++ b/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
@@ -42,7 +42,7 @@ import java.util.Map;
 import java.util.Properties;
 
 /**
- * Class storing configs for the {@link HoodieWriteClient}
+ * Class storing configs for the {@link HoodieWriteClient}.
  */
 @Immutable
 public class HoodieWriteConfig extends DefaultHoodieConfig {
@@ -115,8 +115,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
   }
 
   /**
-   * base properties
-   **/
+   * base properties.
+   */
   public String getBasePath() {
     return props.getProperty(BASE_PATH_PROP);
   }
@@ -210,8 +210,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
   }
 
   /**
-   * compaction properties
-   **/
+   * compaction properties.
+   */
   public HoodieCleaningPolicy getCleanerPolicy() {
     return HoodieCleaningPolicy.valueOf(props.getProperty(HoodieCompactionConfig.CLEANER_POLICY_PROP));
   }
@@ -297,8 +297,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
   }
 
   /**
-   * index properties
-   **/
+   * index properties.
+   */
   public HoodieIndex.IndexType getIndexType() {
     return HoodieIndex.IndexType.valueOf(props.getProperty(HoodieIndexConfig.INDEX_TYPE_PROP));
   }
@@ -417,8 +417,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
   }
 
   /**
-   * storage properties
-   **/
+   * storage properties.
+   */
   public long getParquetMaxFileSize() {
     return Long.parseLong(props.getProperty(HoodieStorageConfig.PARQUET_FILE_MAX_BYTES));
   }
@@ -452,8 +452,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
   }
 
   /**
-   * metrics properties
-   **/
+   * metrics properties.
+   */
   public boolean isMetricsOn() {
     return Boolean.parseBoolean(props.getProperty(HoodieMetricsConfig.METRICS_ON));
   }
@@ -483,7 +483,7 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
   }
 
   /**
-   * memory configs
+   * memory configs.
    */
   public Double getMaxMemoryFractionPerPartitionMerge() {
     return Double.valueOf(props.getProperty(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_MERGE_PROP));
diff --git a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieAppendException.java b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieAppendException.java
index e603569..ffe99cc 100644
--- a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieAppendException.java
+++ b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieAppendException.java
@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
 
 /**
  * <p>
- * Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a delta commit
+ * Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a delta commit.
  * </p>
  */
 public class HoodieAppendException extends HoodieException {
diff --git a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieCommitException.java b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieCommitException.java
index 3fb15a6..b26f63e 100644
--- a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieCommitException.java
+++ b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieCommitException.java
@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
 
 /**
  * <p>
- * Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a Commit
+ * Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a Commit.
  * </p>
  */
 public class HoodieCommitException extends HoodieException {
diff --git a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieDependentSystemUnavailableException.java b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieDependentSystemUnavailableException.java
index 76d679f..4c83ebc 100644
--- a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieDependentSystemUnavailableException.java
+++ b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieDependentSystemUnavailableException.java
@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
 
 /**
  * <p>
- * Exception thrown when dependent system is not available
+ * Exception thrown when dependent system is not available.
  * </p>
  */
 public class HoodieDependentSystemUnavailableException extends HoodieException {
diff --git a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieInsertException.java b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieInsertException.java
index 37995bf..fc014ad 100644
--- a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieInsertException.java
+++ b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieInsertException.java
@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
 
 /**
  * <p>
- * Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a bulk insert
+ * Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a bulk insert.
  * </p>
  */
 public class HoodieInsertException extends HoodieException {
diff --git a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieUpsertException.java b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieUpsertException.java
index 062ef67..8042721 100644
--- a/hudi-client/src/main/java/org/apache/hudi/exception/HoodieUpsertException.java
+++ b/hudi-client/src/main/java/org/apache/hudi/exception/HoodieUpsertException.java
@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
 
 /**
  * <p>
- * Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a incremental upsert
+ * Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a incremental upsert.
  * </p>
  */
 public class HoodieUpsertException extends HoodieException {
diff --git a/hudi-client/src/main/java/org/apache/hudi/func/BulkInsertMapFunction.java b/hudi-client/src/main/java/org/apache/hudi/func/BulkInsertMapFunction.java
index c40bb2a..1386eef 100644
--- a/hudi-client/src/main/java/org/apache/hudi/func/BulkInsertMapFunction.java
+++ b/hudi-client/src/main/java/org/apache/hudi/func/BulkInsertMapFunction.java
@@ -30,7 +30,7 @@ import java.util.Iterator;
 import java.util.List;
 
 /**
- * Map function that handles a sorted stream of HoodieRecords
+ * Map function that handles a sorted stream of HoodieRecords.
  */
 public class BulkInsertMapFunction<T extends HoodieRecordPayload>
     implements Function2<Integer, Iterator<HoodieRecord<T>>, Iterator<List<WriteStatus>>> {
diff --git a/hudi-client/src/main/java/org/apache/hudi/func/CopyOnWriteLazyInsertIterable.java b/hudi-client/src/main/java/org/apache/hudi/func/CopyOnWriteLazyInsertIterable.java
index 8e1c191..4d526f4 100644
--- a/hudi-client/src/main/java/org/apache/hudi/func/CopyOnWriteLazyInsertIterable.java
+++ b/hudi-client/src/main/java/org/apache/hudi/func/CopyOnWriteLazyInsertIterable.java
@@ -122,7 +122,7 @@ public class CopyOnWriteLazyInsertIterable<T extends HoodieRecordPayload>
   }
 
   /**
-   * Consumes stream of hoodie records from in-memory queue and writes to one or more create-handles
+   * Consumes stream of hoodie records from in-memory queue and writes to one or more create-handles.
    */
   protected class CopyOnWriteInsertHandler
       extends BoundedInMemoryQueueConsumer<HoodieInsertValueGenResult<HoodieRecord>, List<WriteStatus>> {
diff --git a/hudi-client/src/main/java/org/apache/hudi/func/LazyIterableIterator.java b/hudi-client/src/main/java/org/apache/hudi/func/LazyIterableIterator.java
index ec05b85..fe5bfbc 100644
--- a/hudi-client/src/main/java/org/apache/hudi/func/LazyIterableIterator.java
+++ b/hudi-client/src/main/java/org/apache/hudi/func/LazyIterableIterator.java
@@ -43,7 +43,7 @@ public abstract class LazyIterableIterator<I, O> implements Iterable<O>, Iterato
   }
 
   /**
-   * Called once, before any elements are processed
+   * Called once, before any elements are processed.
    */
   protected abstract void start();
 
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/HoodieIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/HoodieIndex.java
index 235b3fb..e4baed3 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/HoodieIndex.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/HoodieIndex.java
@@ -39,7 +39,7 @@ import org.apache.spark.api.java.JavaSparkContext;
 import java.io.Serializable;
 
 /**
- * Base class for different types of indexes to determine the mapping from uuid
+ * Base class for different types of indexes to determine the mapping from uuid.
  */
 public abstract class HoodieIndex<T extends HoodieRecordPayload> implements Serializable {
 
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/InMemoryHashIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/InMemoryHashIndex.java
index a8cd6e5..ecfced3 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/InMemoryHashIndex.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/InMemoryHashIndex.java
@@ -99,7 +99,7 @@ public class InMemoryHashIndex<T extends HoodieRecordPayload> extends HoodieInde
   }
 
   /**
-   * Only looks up by recordKey
+   * Only looks up by recordKey.
    */
   @Override
   public boolean isGlobal() {
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java
index 3a51cb9..51e1b76 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java
@@ -23,7 +23,7 @@ import com.google.common.base.Objects;
 import java.io.Serializable;
 
 /**
- * Metadata about a given file group, useful for index lookup
+ * Metadata about a given file group, useful for index lookup.
  */
 public class BloomIndexFileInfo implements Serializable {
 
@@ -62,7 +62,7 @@ public class BloomIndexFileInfo implements Serializable {
   }
 
   /**
-   * Does the given key fall within the range (inclusive)
+   * Does the given key fall within the range (inclusive).
    */
   public boolean isKeyInRange(String recordKey) {
     return minRecordKey.compareTo(recordKey) <= 0 && maxRecordKey.compareTo(recordKey) >= 0;
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java
index aec8cc3..0ffdf11 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java
@@ -141,7 +141,7 @@ public class HoodieBloomIndex<T extends HoodieRecordPayload> extends HoodieIndex
 
   /**
    * Lookup the location for each record key and return the pair<record_key,location> for all record keys already
-   * present and drop the record keys if not present
+   * present and drop the record keys if not present.
    */
   private JavaPairRDD<HoodieKey, HoodieRecordLocation> lookupIndex(
       JavaPairRDD<String, String> partitionRecordKeyPairRDD, final JavaSparkContext jsc,
@@ -167,7 +167,7 @@ public class HoodieBloomIndex<T extends HoodieRecordPayload> extends HoodieIndex
   }
 
   /**
-   * Compute the estimated number of bloom filter comparisons to be performed on each file group
+   * Compute the estimated number of bloom filter comparisons to be performed on each file group.
    */
   private Map<String, Long> computeComparisonsPerFileGroup(final Map<String, Long> recordsPerPartition,
       final Map<String, List<BloomIndexFileInfo>> partitionToFileInfo,
@@ -278,7 +278,7 @@ public class HoodieBloomIndex<T extends HoodieRecordPayload> extends HoodieIndex
   }
 
   /**
-   * This is not global, since we depend on the partitionPath to do the lookup
+   * This is not global, since we depend on the partitionPath to do the lookup.
    */
   @Override
   public boolean isGlobal() {
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndexCheckFunction.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndexCheckFunction.java
index 2a289fe..2881ce0 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndexCheckFunction.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndexCheckFunction.java
@@ -37,7 +37,7 @@ import java.util.List;
 import scala.Tuple2;
 
 /**
- * Function performing actual checking of RDD partition containing (fileId, hoodieKeys) against the actual files
+ * Function performing actual checking of RDD partition containing (fileId, hoodieKeys) against the actual files.
  */
 public class HoodieBloomIndexCheckFunction
     implements Function2<Integer, Iterator<Tuple2<String, HoodieKey>>, Iterator<List<KeyLookupResult>>> {
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java
index ec308c6..8db45e0 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java
@@ -106,7 +106,7 @@ public class HoodieGlobalBloomIndex<T extends HoodieRecordPayload> extends Hoodi
 
 
   /**
-   * Tagging for global index should only consider the record key
+   * Tagging for global index should only consider the record key.
    */
   @Override
   protected JavaRDD<HoodieRecord<T>> tagLocationBacktoRecords(
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/IntervalTreeBasedGlobalIndexFileFilter.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/IntervalTreeBasedGlobalIndexFileFilter.java
index 4e269d7..f3001ea 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/IntervalTreeBasedGlobalIndexFileFilter.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/IntervalTreeBasedGlobalIndexFileFilter.java
@@ -36,7 +36,7 @@ class IntervalTreeBasedGlobalIndexFileFilter implements IndexFileFilter {
   private final Set<String> filesWithNoRanges = new HashSet<>();
 
   /**
-   * Instantiates {@link IntervalTreeBasedGlobalIndexFileFilter}
+   * Instantiates {@link IntervalTreeBasedGlobalIndexFileFilter}.
    *
    * @param partitionToFileIndexInfo Map of partition to List of {@link BloomIndexFileInfo}s
    */
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/IntervalTreeBasedIndexFileFilter.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/IntervalTreeBasedIndexFileFilter.java
index 9737772..849cdc6 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/IntervalTreeBasedIndexFileFilter.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/IntervalTreeBasedIndexFileFilter.java
@@ -35,7 +35,7 @@ class IntervalTreeBasedIndexFileFilter implements IndexFileFilter {
   private final Map<String, Set<String>> partitionToFilesWithNoRanges = new HashMap<>();
 
   /**
-   * Instantiates {@link IntervalTreeBasedIndexFileFilter}
+   * Instantiates {@link IntervalTreeBasedIndexFileFilter}.
    *
    * @param partitionToFileIndexInfo Map of partition to List of {@link BloomIndexFileInfo}s
    */
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/KeyLookupResult.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/KeyLookupResult.java
index 631ff76..191cfd3 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/KeyLookupResult.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/KeyLookupResult.java
@@ -21,7 +21,7 @@ package org.apache.hudi.index.bloom;
 import java.util.List;
 
 /**
- * Encapsulates the result from a key lookup
+ * Encapsulates the result from a key lookup.
  */
 public class KeyLookupResult {
 
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/KeyRangeNode.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/KeyRangeNode.java
index 6594981..df2e971 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/KeyRangeNode.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/KeyRangeNode.java
@@ -39,7 +39,7 @@ class KeyRangeNode implements Comparable<KeyRangeNode>, Serializable {
   private KeyRangeNode right = null;
 
   /**
-   * Instantiates a new {@link KeyRangeNode}
+   * Instantiates a new {@link KeyRangeNode}.
    *
    * @param minRecordKey min record key of the index file
    * @param maxRecordKey max record key of the index file
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/ListBasedGlobalIndexFileFilter.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/ListBasedGlobalIndexFileFilter.java
index d5fe4f6..1f3e04e 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/ListBasedGlobalIndexFileFilter.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/ListBasedGlobalIndexFileFilter.java
@@ -26,7 +26,7 @@ import java.util.Set;
 class ListBasedGlobalIndexFileFilter extends ListBasedIndexFileFilter {
 
   /**
-   * Instantiates {@link ListBasedGlobalIndexFileFilter}
+   * Instantiates {@link ListBasedGlobalIndexFileFilter}.
    *
    * @param partitionToFileIndexInfo Map of partition to List of {@link BloomIndexFileInfo}
    */
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/ListBasedIndexFileFilter.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/ListBasedIndexFileFilter.java
index 9673209..b9a6ce3 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/ListBasedIndexFileFilter.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/ListBasedIndexFileFilter.java
@@ -32,7 +32,7 @@ class ListBasedIndexFileFilter implements IndexFileFilter {
   final Map<String, List<BloomIndexFileInfo>> partitionToFileIndexInfo;
 
   /**
-   * Instantiates {@link ListBasedIndexFileFilter}
+   * Instantiates {@link ListBasedIndexFileFilter}.
    *
    * @param partitionToFileIndexInfo Map of partition to List of {@link BloomIndexFileInfo}
    */
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java
index beb5b4a..87d5557 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java
@@ -67,7 +67,7 @@ import java.util.List;
 import scala.Tuple2;
 
 /**
- * Hoodie Index implementation backed by HBase
+ * Hoodie Index implementation backed by HBase.
  */
 public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
 
@@ -89,7 +89,7 @@ public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
   private int maxQpsPerRegionServer;
   /**
    * multiPutBatchSize will be computed and re-set in updateLocation if
-   * {@link HoodieIndexConfig.HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP} is set to true
+   * {@link HoodieIndexConfig.HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP} is set to true.
    */
   private Integer multiPutBatchSize;
   private Integer numRegionServersForTable;
@@ -150,7 +150,7 @@ public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
 
   /**
    * Since we are sharing the HbaseConnection across tasks in a JVM, make sure the HbaseConnectio is closed when JVM
-   * exits
+   * exits.
    */
   private void addShutDownHook() {
     Runtime.getRuntime().addShutdownHook(new Thread() {
@@ -342,7 +342,7 @@ public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
   }
 
   /**
-   * Helper method to facilitate performing puts and deletes in Hbase
+   * Helper method to facilitate performing puts and deletes in Hbase.
    */
   private void doPutsAndDeletes(HTable hTable, List<Put> puts, List<Delete> deletes) throws IOException {
     if (puts.size() > 0) {
@@ -500,7 +500,7 @@ public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
   }
 
   /**
-   * Only looks up by recordKey
+   * Only looks up by recordKey.
    */
   @Override
   public boolean isGlobal() {
diff --git a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndexQPSResourceAllocator.java b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndexQPSResourceAllocator.java
index bccdd5b..bb876b2 100644
--- a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndexQPSResourceAllocator.java
+++ b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndexQPSResourceAllocator.java
@@ -21,7 +21,7 @@ package org.apache.hudi.index.hbase;
 import java.io.Serializable;
 
 /**
- * <code>HBaseIndexQPSResourceAllocator</code> defines methods to manage resource allocation for HBase index operations
+ * <code>HBaseIndexQPSResourceAllocator</code> defines methods to manage resource allocation for HBase index operations.
  */
 public interface HBaseIndexQPSResourceAllocator extends Serializable {
 
@@ -45,7 +45,7 @@ public interface HBaseIndexQPSResourceAllocator extends Serializable {
   float acquireQPSResources(final float desiredQPSFraction, final long numPuts);
 
   /**
-   * This method releases the acquired QPS Fraction
+   * This method releases the acquired QPS Fraction.
    */
   void releaseQPSResources();
 }
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java
index 62c9e6b..712702a 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java
@@ -301,7 +301,7 @@ public class HoodieAppendHandle<T extends HoodieRecordPayload> extends HoodieWri
   }
 
   /**
-   * Checks if the number of records have reached the set threshold and then flushes the records to disk
+   * Checks if the number of records have reached the set threshold and then flushes the records to disk.
    */
   private void flushToDiskIfRequired(HoodieRecord record) {
     // Append if max number of records reached to achieve block size
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCleanHelper.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCleanHelper.java
index c79b39f..eaaf72f 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCleanHelper.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCleanHelper.java
@@ -51,7 +51,7 @@ import java.util.Map;
 import java.util.stream.Collectors;
 
 /**
- * Cleaner is responsible for garbage collecting older files in a given partition path, such that
+ * Cleaner is responsible for garbage collecting older files in a given partition path. Such that
  * <p>
  * 1) It provides sufficient time for existing queries running on older versions, to close
  * <p>
@@ -83,7 +83,8 @@ public class HoodieCleanHelper<T extends HoodieRecordPayload<T>> implements Seri
   }
 
   /**
-   * Returns list of partitions where clean operations needs to be performed
+   * Returns list of partitions where clean operations needs to be performed.
+   *
    * @param newInstantToRetain New instant to be retained after this cleanup operation
    * @return list of partitions to scan for cleaning
    * @throws IOException when underlying file-system throws this exception
@@ -294,7 +295,7 @@ public class HoodieCleanHelper<T extends HoodieRecordPayload<T>> implements Seri
   }
 
   /**
-   * Determine if file slice needed to be preserved for pending compaction
+   * Determine if file slice needed to be preserved for pending compaction.
    * 
    * @param fileSlice File Slice
    * @return true if file slice needs to be preserved, false otherwise.
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCommitArchiveLog.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCommitArchiveLog.java
index c692949..e7e0cfe 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCommitArchiveLog.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCommitArchiveLog.java
@@ -64,7 +64,7 @@ import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
 /**
- * Archiver to bound the growth of <action>.commit files
+ * Archiver to bound the growth of <action>.commit files.
  */
 public class HoodieCommitArchiveLog {
 
@@ -201,7 +201,7 @@ public class HoodieCommitArchiveLog {
   }
 
   /**
-   * Remove older instants from auxiliary meta folder
+   * Remove older instants from auxiliary meta folder.
    *
    * @param thresholdInstant Hoodie Instant
    * @return success if all eligible file deleted successfully
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java
index 06601b1..c476908 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java
@@ -77,7 +77,7 @@ public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWri
   }
 
   /**
-   * Called by the compactor code path
+   * Called by the compactor code path.
    */
   public HoodieCreateHandle(HoodieWriteConfig config, String commitTime, HoodieTable<T> hoodieTable,
       String partitionPath, String fileId, Iterator<HoodieRecord<T>> recordIterator) {
@@ -124,7 +124,7 @@ public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWri
   }
 
   /**
-   * Writes all records passed
+   * Writes all records passed.
    */
   public void write() {
     try {
@@ -147,7 +147,7 @@ public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWri
   }
 
   /**
-   * Performs actions to durably, persist the current changes and returns a WriteStatus object
+   * Performs actions to durably, persist the current changes and returns a WriteStatus object.
    */
   @Override
   public WriteStatus close() {
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java
index 08e9802..d2cde54 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java
@@ -124,7 +124,7 @@ public class HoodieKeyLookupHandle<T extends HoodieRecordPayload> extends Hoodie
   }
 
   /**
-   * Encapsulates the result from a key lookup
+   * Encapsulates the result from a key lookup.
    */
   public static class KeyLookupResult {
 
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java
index 800c893..075be1a 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java
@@ -77,7 +77,7 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload> extends HoodieWrit
   }
 
   /**
-   * Called by compactor code path
+   * Called by compactor code path.
    */
   public HoodieMergeHandle(HoodieWriteConfig config, String commitTime, HoodieTable<T> hoodieTable,
       Map<String, HoodieRecord<T>> keyToNewRecords, String fileId, HoodieDataFile dataFileToBeMerged) {
@@ -108,7 +108,7 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload> extends HoodieWrit
   }
 
   /**
-   * Determines whether we can accept the incoming records, into the current file, depending on
+   * Determines whether we can accept the incoming records, into the current file. Depending on
    * <p>
    * - Whether it belongs to the same partitionPath as existing records - Whether the current file written bytes lt max
    * file size
@@ -139,14 +139,14 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload> extends HoodieWrit
   }
 
   /**
-   * Rewrite the GenericRecord with the Schema containing the Hoodie Metadata fields
+   * Rewrite the GenericRecord with the Schema containing the Hoodie Metadata fields.
    */
   protected GenericRecord rewriteRecord(GenericRecord record) {
     return HoodieAvroUtils.rewriteRecord(record, writerSchema);
   }
 
   /**
-   * Extract old file path, initialize StorageWriter and WriteStatus
+   * Extract old file path, initialize StorageWriter and WriteStatus.
    */
   private void init(String fileId, String partitionPath, HoodieDataFile dataFileToBeMerged) {
     logger.info("partitionPath:" + partitionPath + ", fileId to be merged:" + fileId);
@@ -189,7 +189,7 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload> extends HoodieWrit
   }
 
   /**
-   * Load the new incoming records in a map and return partitionPath
+   * Load the new incoming records in a map and return partitionPath.
    */
   private String init(String fileId, Iterator<HoodieRecord<T>> newRecordsItr) {
     try {
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieRangeInfoHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieRangeInfoHandle.java
index ea1d84e..3220766 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieRangeInfoHandle.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieRangeInfoHandle.java
@@ -28,7 +28,7 @@ import org.apache.hudi.table.HoodieTable;
 import org.apache.hadoop.fs.Path;
 
 /**
- * Extract range information for a given file slice
+ * Extract range information for a given file slice.
  */
 public class HoodieRangeInfoHandle<T extends HoodieRecordPayload> extends HoodieReadHandle<T> {
 
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java
index 95b994a..c7e6f9a 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java
@@ -90,7 +90,7 @@ public abstract class HoodieWriteHandle<T extends HoodieRecordPayload> extends H
   }
 
   /**
-   * Creates an empty marker file corresponding to storage writer path
+   * Creates an empty marker file corresponding to storage writer path.
    *
    * @param partitionPath Partition path
    */
@@ -105,7 +105,7 @@ public abstract class HoodieWriteHandle<T extends HoodieRecordPayload> extends H
   }
 
   /**
-   * THe marker path will be <base-path>/.hoodie/.temp/<instant_ts>/2019/04/25/filename
+   * THe marker path will be <base-path>/.hoodie/.temp/<instant_ts>/2019/04/25/filename.
    */
   private Path makeNewMarkerPath(String partitionPath) {
     Path markerRootPath = new Path(hoodieTable.getMetaClient().getMarkerFolderPath(instantTime));
@@ -123,7 +123,7 @@ public abstract class HoodieWriteHandle<T extends HoodieRecordPayload> extends H
   }
 
   /**
-   * Determines whether we can accept the incoming records, into the current file, depending on
+   * Determines whether we can accept the incoming records, into the current file. Depending on
    * <p>
    * - Whether it belongs to the same partitionPath as existing records - Whether the current file written bytes lt max
    * file size
@@ -154,7 +154,7 @@ public abstract class HoodieWriteHandle<T extends HoodieRecordPayload> extends H
   }
 
   /**
-   * Rewrite the GenericRecord with the Schema containing the Hoodie Metadata fields
+   * Rewrite the GenericRecord with the Schema containing the Hoodie Metadata fields.
    */
   protected GenericRecord rewriteRecord(GenericRecord record) {
     return HoodieAvroUtils.rewriteRecord(record, writerSchema);
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/compact/HoodieCompactor.java b/hudi-client/src/main/java/org/apache/hudi/io/compact/HoodieCompactor.java
index b62f8b4..afd7691 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/compact/HoodieCompactor.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/compact/HoodieCompactor.java
@@ -32,12 +32,12 @@ import java.io.Serializable;
 import java.util.Set;
 
 /**
- * A HoodieCompactor runs compaction on a hoodie table
+ * A HoodieCompactor runs compaction on a hoodie table.
  */
 public interface HoodieCompactor extends Serializable {
 
   /**
-   * Generate a new compaction plan for scheduling
+   * Generate a new compaction plan for scheduling.
    *
    * @param jsc Spark Context
    * @param hoodieTable Hoodie Table
@@ -51,7 +51,7 @@ public interface HoodieCompactor extends Serializable {
       String compactionCommitTime, Set<HoodieFileGroupId> fgIdsInPendingCompactions) throws IOException;
 
   /**
-   * Execute compaction operations and report back status
+   * Execute compaction operations and report back status.
    */
   JavaRDD<WriteStatus> compact(JavaSparkContext jsc, HoodieCompactionPlan compactionPlan, HoodieTable hoodieTable,
       HoodieWriteConfig config, String compactionInstantTime) throws IOException;
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedIOCompactionStrategy.java b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedIOCompactionStrategy.java
index 0d51df5..c84df1b 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedIOCompactionStrategy.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedIOCompactionStrategy.java
@@ -28,7 +28,7 @@ import java.util.List;
 
 /**
  * CompactionStrategy which looks at total IO to be done for the compaction (read + write) and limits the list of
- * compactions to be under a configured limit on the IO
+ * compactions to be under a configured limit on the IO.
  *
  * @see CompactionStrategy
  */
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/CompactionStrategy.java b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/CompactionStrategy.java
index bdd2497..79a14b6 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/CompactionStrategy.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/CompactionStrategy.java
@@ -116,7 +116,7 @@ public abstract class CompactionStrategy implements Serializable {
   }
 
   /**
-   * Filter the partition paths based on compaction strategy
+   * Filter the partition paths based on compaction strategy.
    * 
    * @param writeConfig
    * @param allPartitionPaths
diff --git a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/LogFileSizeBasedCompactionStrategy.java b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/LogFileSizeBasedCompactionStrategy.java
index b7b3863..4c9ee5f 100644
--- a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/LogFileSizeBasedCompactionStrategy.java
+++ b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/LogFileSizeBasedCompactionStrategy.java
@@ -32,7 +32,7 @@ import java.util.stream.Collectors;
 
 /**
  * LogFileSizeBasedCompactionStrategy orders the compactions based on the total log files size and limits the
- * compactions within a configured IO bound
+ * compactions within a configured IO bound.
  *
  * @see BoundedIOCompactionStrategy
  * @see CompactionStrategy
diff --git a/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsReporter.java b/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsReporter.java
index 45ce852..de52f35 100644
--- a/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsReporter.java
+++ b/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsReporter.java
@@ -26,12 +26,12 @@ import java.io.Closeable;
 public abstract class MetricsReporter {
 
   /**
-   * Push out metrics at scheduled intervals
+   * Push out metrics at scheduled intervals.
    */
   public abstract void start();
 
   /**
-   * Deterministically push out metrics
+   * Deterministically push out metrics.
    */
   public abstract void report();
 
diff --git a/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java b/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java
index b98c5d2..b6c8b6f 100644
--- a/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java
+++ b/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java
@@ -84,7 +84,7 @@ import java.util.stream.Collectors;
 import scala.Tuple2;
 
 /**
- * Implementation of a very heavily read-optimized Hoodie Table where
+ * Implementation of a very heavily read-optimized Hoodie Table where.
  * <p>
  * INSERTS - Produce new files, block aligned to desired size (or) Merge with the smallest existing file, to expand it
  * <p>
@@ -273,7 +273,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
   }
 
   /**
-   * Generates List of files to be cleaned
+   * Generates List of files to be cleaned.
    * 
    * @param jsc JavaSparkContext
    * @return Cleaner Plan
@@ -389,7 +389,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
   }
 
   /**
-   * Delete Inflight instant if enabled
+   * Delete Inflight instant if enabled.
    *
    * @param deleteInstant Enable Deletion of Inflight instant
    * @param activeTimeline Hoodie active timeline
@@ -414,7 +414,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
   }
 
   /**
-   * Consumer that dequeues records from queue and sends to Merge Handle
+   * Consumer that dequeues records from queue and sends to Merge Handle.
    */
   private static class UpdateHandler extends BoundedInMemoryQueueConsumer<GenericRecord, Void> {
 
@@ -474,7 +474,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
   }
 
   /**
-   * Helper class for a small file's location and its actual size on disk
+   * Helper class for a small file's location and its actual size on disk.
    */
   static class SmallFile implements Serializable {
 
@@ -493,7 +493,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
 
   /**
    * Helper class for an insert bucket along with the weight [0.0, 0.1] that defines the amount of incoming inserts that
-   * should be allocated to the bucket
+   * should be allocated to the bucket.
    */
   class InsertBucket implements Serializable {
 
@@ -512,7 +512,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
   }
 
   /**
-   * Helper class for a bucket's type (INSERT and UPDATE) and its file location
+   * Helper class for a bucket's type (INSERT and UPDATE) and its file location.
    */
   class BucketInfo implements Serializable {
 
@@ -530,16 +530,16 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
   }
 
   /**
-   * Packs incoming records to be upserted, into buckets (1 bucket = 1 RDD partition)
+   * Packs incoming records to be upserted, into buckets (1 bucket = 1 RDD partition).
    */
   class UpsertPartitioner extends Partitioner {
 
     /**
-     * List of all small files to be corrected
+     * List of all small files to be corrected.
      */
     List<SmallFile> smallFiles = new ArrayList<SmallFile>();
     /**
-     * Total number of RDD partitions, is determined by total buckets we want to pack the incoming workload into
+     * Total number of RDD partitions, is determined by total buckets we want to pack the incoming workload into.
      */
     private int totalBuckets = 0;
     /**
@@ -560,7 +560,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
     private HashMap<Integer, BucketInfo> bucketInfoMap;
 
     /**
-     * Rolling stats for files
+     * Rolling stats for files.
      */
     protected HoodieRollingStatMetadata rollingStatMetadata;
     protected long averageRecordSize;
@@ -672,7 +672,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
     }
 
     /**
-     * Returns a list of small files in the given partition path
+     * Returns a list of small files in the given partition path.
      */
     protected List<SmallFile> getSmallFiles(String partitionPath) {
 
diff --git a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java
index e488e9f..da29d98 100644
--- a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java
+++ b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java
@@ -201,7 +201,7 @@ public class HoodieMergeOnReadTable<T extends HoodieRecordPayload> extends Hoodi
 
   /**
    * Generate all rollback requests that we need to perform for rolling back this action without actually performing
-   * rolling back
+   * rolling back.
    * 
    * @param jsc JavaSparkContext
    * @param instantToRollback Instant to Rollback
diff --git a/hudi-client/src/main/java/org/apache/hudi/table/HoodieTable.java b/hudi-client/src/main/java/org/apache/hudi/table/HoodieTable.java
index 1ea174d..b7f485d 100644
--- a/hudi-client/src/main/java/org/apache/hudi/table/HoodieTable.java
+++ b/hudi-client/src/main/java/org/apache/hudi/table/HoodieTable.java
@@ -69,7 +69,7 @@ import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
 /**
- * Abstract implementation of a HoodieTable
+ * Abstract implementation of a HoodieTable.
  */
 public abstract class HoodieTable<T extends HoodieRecordPayload> implements Serializable {
 
@@ -111,17 +111,17 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
   }
 
   /**
-   * Provides a partitioner to perform the upsert operation, based on the workload profile
+   * Provides a partitioner to perform the upsert operation, based on the workload profile.
    */
   public abstract Partitioner getUpsertPartitioner(WorkloadProfile profile);
 
   /**
-   * Provides a partitioner to perform the insert operation, based on the workload profile
+   * Provides a partitioner to perform the insert operation, based on the workload profile.
    */
   public abstract Partitioner getInsertPartitioner(WorkloadProfile profile);
 
   /**
-   * Return whether this HoodieTable implementation can benefit from workload profiling
+   * Return whether this HoodieTable implementation can benefit from workload profiling.
    */
   public abstract boolean isWorkloadProfileNeeded();
 
@@ -138,84 +138,84 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
   }
 
   /**
-   * Get the view of the file system for this table
+   * Get the view of the file system for this table.
    */
   public TableFileSystemView getFileSystemView() {
     return new HoodieTableFileSystemView(metaClient, getCompletedCommitsTimeline());
   }
 
   /**
-   * Get the read optimized view of the file system for this table
+   * Get the read optimized view of the file system for this table.
    */
   public TableFileSystemView.ReadOptimizedView getROFileSystemView() {
     return getViewManager().getFileSystemView(metaClient.getBasePath());
   }
 
   /**
-   * Get the real time view of the file system for this table
+   * Get the real time view of the file system for this table.
    */
   public TableFileSystemView.RealtimeView getRTFileSystemView() {
     return getViewManager().getFileSystemView(metaClient.getBasePath());
   }
 
   /**
-   * Get complete view of the file system for this table with ability to force sync
+   * Get complete view of the file system for this table with ability to force sync.
    */
   public SyncableFileSystemView getHoodieView() {
     return getViewManager().getFileSystemView(metaClient.getBasePath());
   }
 
   /**
-   * Get only the completed (no-inflights) commit + deltacommit timeline
+   * Get only the completed (no-inflights) commit + deltacommit timeline.
    */
   public HoodieTimeline getCompletedCommitsTimeline() {
     return metaClient.getCommitsTimeline().filterCompletedInstants();
   }
 
   /**
-   * Get only the completed (no-inflights) commit timeline
+   * Get only the completed (no-inflights) commit timeline.
    */
   public HoodieTimeline getCompletedCommitTimeline() {
     return metaClient.getCommitTimeline().filterCompletedInstants();
   }
 
   /**
-   * Get only the inflights (no-completed) commit timeline
+   * Get only the inflights (no-completed) commit timeline.
    */
   public HoodieTimeline getInflightCommitTimeline() {
     return metaClient.getCommitsTimeline().filterInflightsExcludingCompaction();
   }
 
   /**
-   * Get only the completed (no-inflights) clean timeline
+   * Get only the completed (no-inflights) clean timeline.
    */
   public HoodieTimeline getCompletedCleanTimeline() {
     return getActiveTimeline().getCleanerTimeline().filterCompletedInstants();
   }
 
   /**
-   * Get clean timeline
+   * Get clean timeline.
    */
   public HoodieTimeline getCleanTimeline() {
     return getActiveTimeline().getCleanerTimeline();
   }
 
   /**
-   * Get only the completed (no-inflights) savepoint timeline
+   * Get only the completed (no-inflights) savepoint timeline.
    */
   public HoodieTimeline getCompletedSavepointTimeline() {
     return getActiveTimeline().getSavePointTimeline().filterCompletedInstants();
   }
 
   /**
-   * Get the list of savepoints in this table
+   * Get the list of savepoints in this table.
    */
   public List<String> getSavepoints() {
     return getCompletedSavepointTimeline().getInstants().map(HoodieInstant::getTimestamp).collect(Collectors.toList());
   }
 
   /**
-   * Get the list of data file names savepointed
+   * Get the list of data file names savepointed.
    */
   public Stream<String> getSavepointedDataFiles(String savepointTime) {
     if (!getSavepoints().contains(savepointTime)) {
@@ -237,26 +237,26 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
   }
 
   /**
-   * Return the index
+   * Return the index.
    */
   public HoodieIndex<T> getIndex() {
     return index;
   }
 
   /**
-   * Perform the ultimate IO for a given upserted (RDD) partition
+   * Perform the ultimate IO for a given upserted (RDD) partition.
    */
   public abstract Iterator<List<WriteStatus>> handleUpsertPartition(String commitTime, Integer partition,
       Iterator<HoodieRecord<T>> recordIterator, Partitioner partitioner);
 
   /**
-   * Perform the ultimate IO for a given inserted (RDD) partition
+   * Perform the ultimate IO for a given inserted (RDD) partition.
    */
   public abstract Iterator<List<WriteStatus>> handleInsertPartition(String commitTime, Integer partition,
       Iterator<HoodieRecord<T>> recordIterator, Partitioner partitioner);
 
   /**
-   * Schedule compaction for the instant time
+   * Schedule compaction for the instant time.
    * 
    * @param jsc Spark Context
    * @param instantTime Instant Time for scheduling compaction
@@ -265,7 +265,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
   public abstract HoodieCompactionPlan scheduleCompaction(JavaSparkContext jsc, String instantTime);
 
   /**
-   * Run Compaction on the table. Compaction arranges the data so that it is optimized for data access
+   * Run Compaction on the table. Compaction arranges the data so that it is optimized for data access.
    *
    * @param jsc Spark Context
    * @param compactionInstantTime Instant Time
@@ -275,7 +275,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
       HoodieCompactionPlan compactionPlan);
 
   /**
-   * Generates list of files that are eligible for cleaning
+   * Generates list of files that are eligible for cleaning.
    * 
    * @param jsc Java Spark Context
    * @return Cleaner Plan containing list of files to be deleted.
@@ -283,7 +283,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
   public abstract HoodieCleanerPlan scheduleClean(JavaSparkContext jsc);
 
   /**
-   * Cleans the files listed in the cleaner plan associated with clean instant
+   * Cleans the files listed in the cleaner plan associated with clean instant.
    * 
    * @param jsc Java Spark Context
    * @param cleanInstant Clean Instant
@@ -300,7 +300,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
       throws IOException;
 
   /**
-   * Finalize the written data onto storage. Perform any final cleanups
+   * Finalize the written data onto storage. Perform any final cleanups.
    *
    * @param jsc Spark Context
    * @param stats List of HoodieWriteStats
@@ -312,7 +312,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
   }
 
   /**
-   * Delete Marker directory corresponding to an instant
+   * Delete Marker directory corresponding to an instant.
    * 
    * @param instantTs Instant Time
    */
@@ -409,7 +409,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
   }
 
   /**
-   * Ensures all files passed either appear or disappear
+   * Ensures all files passed either appear or disappear.
    * 
    * @param jsc JavaSparkContext
    * @param groupByPartition Files grouped by partition
diff --git a/hudi-client/src/main/java/org/apache/hudi/table/RollbackExecutor.java b/hudi-client/src/main/java/org/apache/hudi/table/RollbackExecutor.java
index 8c8455a..7468156 100644
--- a/hudi-client/src/main/java/org/apache/hudi/table/RollbackExecutor.java
+++ b/hudi-client/src/main/java/org/apache/hudi/table/RollbackExecutor.java
@@ -51,7 +51,7 @@ import java.util.Map;
 import scala.Tuple2;
 
 /**
- * Performs Rollback of Hoodie Tables
+ * Performs Rollback of Hoodie Tables.
  */
 public class RollbackExecutor implements Serializable {
 
@@ -143,7 +143,7 @@ public class RollbackExecutor implements Serializable {
   }
 
   /**
-   * Helper to merge 2 rollback-stats for a given partition
+   * Helper to merge 2 rollback-stats for a given partition.
    *
    * @param stat1 HoodieRollbackStat
    * @param stat2 HoodieRollbackStat
@@ -177,7 +177,7 @@ public class RollbackExecutor implements Serializable {
   }
 
   /**
-   * Common method used for cleaning out parquet files under a partition path during rollback of a set of commits
+   * Common method used for cleaning out parquet files under a partition path during rollback of a set of commits.
    */
   private Map<FileStatus, Boolean> deleteCleanedFiles(HoodieTableMetaClient metaClient, HoodieWriteConfig config,
       Map<FileStatus, Boolean> results, String partitionPath, PathFilter filter) throws IOException {
@@ -193,7 +193,7 @@ public class RollbackExecutor implements Serializable {
   }
 
   /**
-   * Common method used for cleaning out parquet files under a partition path during rollback of a set of commits
+   * Common method used for cleaning out parquet files under a partition path during rollback of a set of commits.
    */
   private Map<FileStatus, Boolean> deleteCleanedFiles(HoodieTableMetaClient metaClient, HoodieWriteConfig config,
       Map<FileStatus, Boolean> results, String commit, String partitionPath) throws IOException {
diff --git a/hudi-client/src/main/java/org/apache/hudi/table/RollbackRequest.java b/hudi-client/src/main/java/org/apache/hudi/table/RollbackRequest.java
index 326f347..57db418 100644
--- a/hudi-client/src/main/java/org/apache/hudi/table/RollbackRequest.java
+++ b/hudi-client/src/main/java/org/apache/hudi/table/RollbackRequest.java
@@ -22,39 +22,39 @@ import org.apache.hudi.common.table.timeline.HoodieInstant;
 import org.apache.hudi.common.util.Option;
 
 /**
- * Request for performing one rollback action
+ * Request for performing one rollback action.
  */
 public class RollbackRequest {
 
   /**
-   * Rollback Action Types
+   * Rollback Action Types.
    */
   public enum RollbackAction {
     DELETE_DATA_FILES_ONLY, DELETE_DATA_AND_LOG_FILES, APPEND_ROLLBACK_BLOCK
   }
 
   /**
-   * Partition path that needs to be rolled-back
+   * Partition path that needs to be rolled-back.
    */
   private final String partitionPath;
 
   /**
-   * Rollback Instant
+   * Rollback Instant.
    */
   private final HoodieInstant rollbackInstant;
 
   /**
-   * FileId in case of appending rollback block
+   * FileId in case of appending rollback block.
    */
   private final Option<String> fileId;
 
   /**
-   * Latest base instant needed for appending rollback block instant
+   * Latest base instant needed for appending rollback block instant.
    */
   private final Option<String> latestBaseInstant;
 
   /**
-   * Rollback Action
+   * Rollback Action.
    */
   private final RollbackAction rollbackAction;
 
diff --git a/hudi-client/src/main/java/org/apache/hudi/table/WorkloadProfile.java b/hudi-client/src/main/java/org/apache/hudi/table/WorkloadProfile.java
index 31d2e1d..029276a 100644
--- a/hudi-client/src/main/java/org/apache/hudi/table/WorkloadProfile.java
+++ b/hudi-client/src/main/java/org/apache/hudi/table/WorkloadProfile.java
@@ -33,19 +33,19 @@ import java.util.Set;
 import scala.Tuple2;
 
 /**
- * Information about incoming records for upsert/insert obtained either via sampling or introspecting the data fully
+ * Information about incoming records for upsert/insert obtained either via sampling or introspecting the data fully.
  * <p>
  * TODO(vc): Think about obtaining this directly from index.tagLocation
  */
 public class WorkloadProfile<T extends HoodieRecordPayload> implements Serializable {
 
   /**
-   * Input workload
+   * Input workload.
    */
   private final JavaRDD<HoodieRecord<T>> taggedRecords;
 
   /**
-   * Computed workload profile
+   * Computed workload profile.
    */
   private final HashMap<String, WorkloadStat> partitionPathStatMap;
 
diff --git a/hudi-client/src/test/java/org/apache/hudi/TestAsyncCompaction.java b/hudi-client/src/test/java/org/apache/hudi/TestAsyncCompaction.java
index 5a91ca1..451a2b1 100644
--- a/hudi-client/src/test/java/org/apache/hudi/TestAsyncCompaction.java
+++ b/hudi-client/src/test/java/org/apache/hudi/TestAsyncCompaction.java
@@ -65,7 +65,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 /**
- * Test Cases for Async Compaction and Ingestion interaction
+ * Test Cases for Async Compaction and Ingestion interaction.
  */
 public class TestAsyncCompaction extends TestHoodieClientBase {
 
@@ -400,7 +400,7 @@ public class TestAsyncCompaction extends TestHoodieClientBase {
   }
 
   /**
-   * HELPER METHODS FOR TESTING
+   * HELPER METHODS FOR TESTING.
    **/
 
   private void validateDeltaCommit(String latestDeltaCommit,
diff --git a/hudi-client/src/test/java/org/apache/hudi/TestCleaner.java b/hudi-client/src/test/java/org/apache/hudi/TestCleaner.java
index 200575a..57d5fc0 100644
--- a/hudi-client/src/test/java/org/apache/hudi/TestCleaner.java
+++ b/hudi-client/src/test/java/org/apache/hudi/TestCleaner.java
@@ -87,7 +87,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 /**
- * Test Cleaning related logic
+ * Test Cleaning related logic.
  */
 public class TestCleaner extends TestHoodieClientBase {
 
@@ -95,7 +95,7 @@ public class TestCleaner extends TestHoodieClientBase {
   private static Logger logger = LogManager.getLogger(TestHoodieClientBase.class);
 
   /**
-   * Helper method to do first batch of insert for clean by versions/commits tests
+   * Helper method to do first batch of insert for clean by versions/commits tests.
    *
    * @param cfg Hoodie Write Config
    * @param client Hoodie Client
@@ -140,7 +140,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Clean-By-Versions using insert/upsert API
+   * Test Clean-By-Versions using insert/upsert API.
    */
   @Test
   public void testInsertAndCleanByVersions() throws Exception {
@@ -148,7 +148,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Clean-By-Versions using prepped versions of insert/upsert API
+   * Test Clean-By-Versions using prepped versions of insert/upsert API.
    */
   @Test
   public void testInsertPreppedAndCleanByVersions() throws Exception {
@@ -157,7 +157,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Clean-By-Versions using bulk-insert/upsert API
+   * Test Clean-By-Versions using bulk-insert/upsert API.
    */
   @Test
   public void testBulkInsertAndCleanByVersions() throws Exception {
@@ -165,7 +165,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Clean-By-Versions using prepped versions of bulk-insert/upsert API
+   * Test Clean-By-Versions using prepped versions of bulk-insert/upsert API.
    */
   @Test
   public void testBulkInsertPreppedAndCleanByVersions() throws Exception {
@@ -175,7 +175,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Helper for Cleaning by versions logic from HoodieWriteClient API perspective
+   * Test Helper for Cleaning by versions logic from HoodieWriteClient API perspective.
    *
    * @param insertFn Insert API to be tested
    * @param upsertFn Upsert API to be tested
@@ -301,7 +301,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Clean-By-Versions using insert/upsert API
+   * Test Clean-By-Versions using insert/upsert API.
    */
   @Test
   public void testInsertAndCleanByCommits() throws Exception {
@@ -309,7 +309,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Clean-By-Versions using prepped version of insert/upsert API
+   * Test Clean-By-Versions using prepped version of insert/upsert API.
    */
   @Test
   public void testInsertPreppedAndCleanByCommits() throws Exception {
@@ -317,7 +317,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Clean-By-Versions using prepped versions of bulk-insert/upsert API
+   * Test Clean-By-Versions using prepped versions of bulk-insert/upsert API.
    */
   @Test
   public void testBulkInsertPreppedAndCleanByCommits() throws Exception {
@@ -327,7 +327,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Clean-By-Versions using bulk-insert/upsert API
+   * Test Clean-By-Versions using bulk-insert/upsert API.
    */
   @Test
   public void testBulkInsertAndCleanByCommits() throws Exception {
@@ -335,7 +335,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Helper for Cleaning by versions logic from HoodieWriteClient API perspective
+   * Test Helper for Cleaning by versions logic from HoodieWriteClient API perspective.
    *
    * @param insertFn Insert API to be tested
    * @param upsertFn Upsert API to be tested
@@ -407,7 +407,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Helper to run cleaner and collect Clean Stats
+   * Helper to run cleaner and collect Clean Stats.
    *
    * @param config HoodieWriteConfig
    */
@@ -416,7 +416,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Helper to run cleaner and collect Clean Stats
+   * Helper to run cleaner and collect Clean Stats.
    *
    * @param config HoodieWriteConfig
    */
@@ -463,7 +463,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test HoodieTable.clean() Cleaning by versions logic
+   * Test HoodieTable.clean() Cleaning by versions logic.
    */
   @Test
   public void testKeepLatestFileVersions() throws IOException {
@@ -555,7 +555,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test HoodieTable.clean() Cleaning by versions logic for MOR table with Log files
+   * Test HoodieTable.clean() Cleaning by versions logic for MOR table with Log files.
    */
   @Test
   public void testKeepLatestFileVersionsMOR() throws IOException {
@@ -701,7 +701,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test HoodieTable.clean() Cleaning by commit logic for MOR table with Log files
+   * Test HoodieTable.clean() Cleaning by commit logic for MOR table with Log files.
    */
   @Test
   public void testKeepLatestCommits() throws IOException {
@@ -718,7 +718,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test HoodieTable.clean() Cleaning by commit logic for MOR table with Log files
+   * Test HoodieTable.clean() Cleaning by commit logic for MOR table with Log files.
    */
   @Test
   public void testKeepLatestCommitsIncrMode() throws IOException {
@@ -936,7 +936,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Test Keep Latest Commits when there are pending compactions
+   * Test Keep Latest Commits when there are pending compactions.
    */
   @Test
   public void testKeepLatestCommitsWithPendingCompactions() throws IOException {
@@ -967,7 +967,7 @@ public class TestCleaner extends TestHoodieClientBase {
 
 
   /**
-   * Test Keep Latest Versions when there are pending compactions
+   * Test Keep Latest Versions when there are pending compactions.
    */
   @Test
   public void testKeepLatestVersionsWithPendingCompactionsAndFailureRetry() throws IOException {
@@ -993,7 +993,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Common test method for validating pending compactions
+   * Common test method for validating pending compactions.
    *
    * @param config Hoodie Write Config
    * @param expNumFilesDeleted Number of files deleted
@@ -1111,7 +1111,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /**
-   * Utility method to create temporary data files
+   * Utility method to create temporary data files.
    *
    * @param commitTime Commit Timestamp
    * @param numFiles Number for files to be generated
@@ -1127,7 +1127,7 @@ public class TestCleaner extends TestHoodieClientBase {
   }
 
   /***
-   * Helper method to return temporary files count
+   * Helper method to return temporary files count.
    * 
    * @return Number of temporary files found
    * @throws IOException in case of error
diff --git a/hudi-client/src/test/java/org/apache/hudi/TestClientRollback.java b/hudi-client/src/test/java/org/apache/hudi/TestClientRollback.java
index 28d2351..45cfbd2 100644
--- a/hudi-client/src/test/java/org/apache/hudi/TestClientRollback.java
+++ b/hudi-client/src/test/java/org/apache/hudi/TestClientRollback.java
@@ -47,12 +47,12 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
- * Test Cases for rollback of snapshots and commits
+ * Test Cases for rollback of snapshots and commits.
  */
 public class TestClientRollback extends TestHoodieClientBase {
 
   /**
-   * Test case for rollback-savepoint interaction
+   * Test case for rollback-savepoint interaction.
    */
   @Test
   public void testSavepointAndRollback() throws Exception {
@@ -165,7 +165,7 @@ public class TestClientRollback extends TestHoodieClientBase {
   }
 
   /**
-   * Test Cases for effects of rollbacking completed/inflight commits
+   * Test Cases for effects of rollbacking completed/inflight commits.
    */
   @Test
   public void testRollbackCommit() throws Exception {
@@ -255,7 +255,7 @@ public class TestClientRollback extends TestHoodieClientBase {
   }
 
   /**
-   * Test auto-rollback of commits which are in flight
+   * Test auto-rollback of commits which are in flight.
    */
   @Test
   public void testAutoRollbackInflightCommit() throws Exception {
diff --git a/hudi-client/src/test/java/org/apache/hudi/TestCompactionAdminClient.java b/hudi-client/src/test/java/org/apache/hudi/TestCompactionAdminClient.java
index 7d23ecd..1ddc0fd 100644
--- a/hudi-client/src/test/java/org/apache/hudi/TestCompactionAdminClient.java
+++ b/hudi-client/src/test/java/org/apache/hudi/TestCompactionAdminClient.java
@@ -163,7 +163,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
   }
 
   /**
-   * Enssure compaction plan is valid
+   * Enssure compaction plan is valid.
    *
    * @param compactionInstant Compaction Instant
    */
@@ -206,7 +206,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
   }
 
   /**
-   * Validate Unschedule operations
+   * Validate Unschedule operations.
    */
   private List<Pair<HoodieLogFile, HoodieLogFile>> validateUnSchedulePlan(CompactionAdminClient client,
       String ingestionInstant, String compactionInstant, int numEntriesPerInstant, int expNumRenames) throws Exception {
@@ -215,7 +215,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
   }
 
   /**
-   * Validate Unschedule operations
+   * Validate Unschedule operations.
    */
   private List<Pair<HoodieLogFile, HoodieLogFile>> validateUnSchedulePlan(CompactionAdminClient client,
       String ingestionInstant, String compactionInstant, int numEntriesPerInstant, int expNumRenames,
@@ -287,7 +287,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
   }
 
   /**
-   * Validate Unschedule operations
+   * Validate Unschedule operations.
    */
   private void validateUnScheduleFileId(CompactionAdminClient client, String ingestionInstant, String compactionInstant,
       CompactionOperation op, int expNumRenames) throws Exception {
diff --git a/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientBase.java b/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientBase.java
index 7e99c9c..d35a1e5 100644
--- a/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientBase.java
+++ b/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientBase.java
@@ -66,7 +66,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 /**
- * Base Class providing setup/cleanup and utility methods for testing Hoodie Client facing tests
+ * Base Class providing setup/cleanup and utility methods for testing Hoodie Client facing tests.
  */
 public class TestHoodieClientBase extends HoodieClientTestHarness {
 
@@ -104,7 +104,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Get Default HoodieWriteConfig for tests
+   * Get Default HoodieWriteConfig for tests.
    *
    * @return Default Hoodie Write Config for tests
    */
@@ -114,7 +114,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
 
 
   /**
-   * Get Config builder with default configs set
+   * Get Config builder with default configs set.
    *
    * @return Config Builder
    */
@@ -123,7 +123,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Get Config builder with default configs set
+   * Get Config builder with default configs set.
    *
    * @return Config Builder
    */
@@ -147,7 +147,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Assert no failures in writing hoodie files
+   * Assert no failures in writing hoodie files.
    *
    * @param statuses List of Write Status
    */
@@ -159,7 +159,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Ensure presence of partition meta-data at known depth
+   * Ensure presence of partition meta-data at known depth.
    *
    * @param partitionPaths Partition paths to check
    * @param fs File System
@@ -175,7 +175,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Ensure records have location field set
+   * Ensure records have location field set.
    *
    * @param taggedRecords Tagged Records
    * @param commitTime Commit Timestamp
@@ -189,7 +189,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Assert that there is no duplicate key at the partition level
+   * Assert that there is no duplicate key at the partition level.
    *
    * @param records List of Hoodie records
    */
@@ -252,7 +252,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Generate wrapper for record generation function for testing Prepped APIs
+   * Generate wrapper for record generation function for testing Prepped APIs.
    *
    * @param isPreppedAPI Flag to indicate if this is for testing prepped-version of APIs
    * @param writeConfig Hoodie Write Config
@@ -269,7 +269,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Generate wrapper for delete key generation function for testing Prepped APIs
+   * Generate wrapper for delete key generation function for testing Prepped APIs.
    *
    * @param isPreppedAPI Flag to indicate if this is for testing prepped-version of APIs
    * @param writeConfig Hoodie Write Config
@@ -286,7 +286,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Helper to insert first batch of records and do regular assertions on the state after successful completion
+   * Helper to insert first batch of records and do regular assertions on the state after successful completion.
    *
    * @param writeConfig Hoodie Write Config
    * @param client Hoodie Write Client
@@ -312,7 +312,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Helper to upsert batch of records and do regular assertions on the state after successful completion
+   * Helper to upsert batch of records and do regular assertions on the state after successful completion.
    *
    * @param writeConfig Hoodie Write Config
    * @param client Hoodie Write Client
@@ -344,7 +344,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Helper to delete batch of keys and do regular assertions on the state after successful completion
+   * Helper to delete batch of keys and do regular assertions on the state after successful completion.
    *
    * @param writeConfig Hoodie Write Config
    * @param client Hoodie Write Client
@@ -374,7 +374,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Helper to insert/upsert batch of records and do regular assertions on the state after successful completion
+   * Helper to insert/upsert batch of records and do regular assertions on the state after successful completion.
    *
    * @param client Hoodie Write Client
    * @param newCommitTime New Commit Timestamp to be used
@@ -445,7 +445,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Helper to delete batch of hoodie keys and do regular assertions on the state after successful completion
+   * Helper to delete batch of hoodie keys and do regular assertions on the state after successful completion.
    *
    * @param client Hoodie Write Client
    * @param newCommitTime New Commit Timestamp to be used
@@ -507,7 +507,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Get Cleaner state corresponding to a partition path
+   * Get Cleaner state corresponding to a partition path.
    *
    * @param hoodieCleanStatsTwo List of Clean Stats
    * @param partitionPath Partition path for filtering
@@ -518,7 +518,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Utility to simulate commit touching files in a partition
+   * Utility to simulate commit touching files in a partition.
    *
    * @param files List of file-Ids to be touched
    * @param partitionPath Partition
@@ -532,7 +532,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
   }
 
   /**
-   * Helper methods to create new data files in a partition
+   * Helper methods to create new data files in a partition.
    *
    * @param partitionPath Partition
    * @param commitTime Commit Timestamp
diff --git a/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientOnCopyOnWriteStorage.java b/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientOnCopyOnWriteStorage.java
index 790f1b3..7cf2da7 100644
--- a/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientOnCopyOnWriteStorage.java
+++ b/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientOnCopyOnWriteStorage.java
@@ -77,7 +77,7 @@ import static org.mockito.Mockito.when;
 public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
 
   /**
-   * Test Auto Commit behavior for HoodieWriteClient insert API
+   * Test Auto Commit behavior for HoodieWriteClient insert API.
    */
   @Test
   public void testAutoCommitOnInsert() throws Exception {
@@ -85,7 +85,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test Auto Commit behavior for HoodieWriteClient insertPrepped API
+   * Test Auto Commit behavior for HoodieWriteClient insertPrepped API.
    */
   @Test
   public void testAutoCommitOnInsertPrepped() throws Exception {
@@ -93,7 +93,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test Auto Commit behavior for HoodieWriteClient upsert API
+   * Test Auto Commit behavior for HoodieWriteClient upsert API.
    */
   @Test
   public void testAutoCommitOnUpsert() throws Exception {
@@ -101,7 +101,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test Auto Commit behavior for HoodieWriteClient upsert Prepped API
+   * Test Auto Commit behavior for HoodieWriteClient upsert Prepped API.
    */
   @Test
   public void testAutoCommitOnUpsertPrepped() throws Exception {
@@ -109,7 +109,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test Auto Commit behavior for HoodieWriteClient bulk-insert API
+   * Test Auto Commit behavior for HoodieWriteClient bulk-insert API.
    */
   @Test
   public void testAutoCommitOnBulkInsert() throws Exception {
@@ -117,7 +117,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test Auto Commit behavior for HoodieWriteClient bulk-insert prepped API
+   * Test Auto Commit behavior for HoodieWriteClient bulk-insert prepped API.
    */
   @Test
   public void testAutoCommitOnBulkInsertPrepped() throws Exception {
@@ -126,7 +126,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test auto-commit by applying write function
+   * Test auto-commit by applying write function.
    *
    * @param writeFn One of HoodieWriteClient Write API
    * @throws Exception in case of failure
@@ -152,7 +152,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test De-duplication behavior for HoodieWriteClient insert API
+   * Test De-duplication behavior for HoodieWriteClient insert API.
    */
   @Test
   public void testDeduplicationOnInsert() throws Exception {
@@ -160,7 +160,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test De-duplication behavior for HoodieWriteClient bulk-insert API
+   * Test De-duplication behavior for HoodieWriteClient bulk-insert API.
    */
   @Test
   public void testDeduplicationOnBulkInsert() throws Exception {
@@ -168,7 +168,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test De-duplication behavior for HoodieWriteClient upsert API
+   * Test De-duplication behavior for HoodieWriteClient upsert API.
    */
   @Test
   public void testDeduplicationOnUpsert() throws Exception {
@@ -176,7 +176,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test Deduplication Logic for write function
+   * Test Deduplication Logic for write function.
    *
    * @param writeFn One of HoddieWriteClient non-prepped write APIs
    * @throws Exception in case of failure
@@ -224,7 +224,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Build a test Hoodie WriteClient with dummy index to configure isGlobal flag
+   * Build a test Hoodie WriteClient with dummy index to configure isGlobal flag.
    *
    * @param isGlobal Flag to control HoodieIndex.isGlobal
    * @return Hoodie Write Client
@@ -237,7 +237,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test Upsert API
+   * Test Upsert API.
    */
   @Test
   public void testUpserts() throws Exception {
@@ -245,7 +245,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test UpsertPrepped API
+   * Test UpsertPrepped API.
    */
   @Test
   public void testUpsertsPrepped() throws Exception {
@@ -253,7 +253,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test one of HoodieWriteClient upsert(Prepped) APIs
+   * Test one of HoodieWriteClient upsert(Prepped) APIs.
    *
    * @param hoodieWriteConfig Write Config
    * @param writeFn One of Hoodie Write Function API
@@ -291,7 +291,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Tesst deletion of records
+   * Tesst deletion of records.
    */
   @Test
   public void testDeletes() throws Exception {
@@ -318,7 +318,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
         -1, recordGenFunction, HoodieWriteClient::upsert, true, 200, 200, 1);
 
     /**
-     * Write 2 (deletes+writes)
+     * Write 2 (deletes+writes).
      */
     String prevCommitTime = newCommitTime;
     newCommitTime = "004";
@@ -336,7 +336,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test scenario of new file-group getting added during upsert()
+   * Test scenario of new file-group getting added during upsert().
    */
   @Test
   public void testSmallInsertHandlingForUpserts() throws Exception {
@@ -448,7 +448,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test scenario of new file-group getting added during insert()
+   * Test scenario of new file-group getting added during insert().
    */
   @Test
   public void testSmallInsertHandlingForInserts() throws Exception {
@@ -530,7 +530,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test delete with delete api
+   * Test delete with delete api.
    */
   @Test
   public void testDeletesWithDeleteApi() throws Exception {
@@ -659,7 +659,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test delete with delete api
+   * Test delete with delete api.
    */
   @Test
   public void testDeletesWithoutInserts() throws Exception {
@@ -688,7 +688,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test to ensure commit metadata points to valid files
+   * Test to ensure commit metadata points to valid files.
    */
   @Test
   public void testCommitWritesRelativePaths() throws Exception {
@@ -735,7 +735,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Test to ensure commit metadata points to valid files
+   * Test to ensure commit metadata points to valid files.
    */
   @Test
   public void testRollingStatsInMetadata() throws Exception {
@@ -810,7 +810,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Tests behavior of committing only when consistency is verified
+   * Tests behavior of committing only when consistency is verified.
    */
   @Test
   public void testConsistencyCheckDuringFinalize() throws Exception {
@@ -879,14 +879,14 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
   }
 
   /**
-   * Build Hoodie Write Config for small data file sizes
+   * Build Hoodie Write Config for small data file sizes.
    */
   private HoodieWriteConfig getSmallInsertWriteConfig(int insertSplitSize) {
     return getSmallInsertWriteConfig(insertSplitSize, false);
   }
 
   /**
-   * Build Hoodie Write Config for small data file sizes
+   * Build Hoodie Write Config for small data file sizes.
    */
   private HoodieWriteConfig getSmallInsertWriteConfig(int insertSplitSize, boolean useNullSchema) {
     HoodieWriteConfig.Builder builder = getConfigBuilder(useNullSchema ? NULL_SCHEMA : TRIP_EXAMPLE_SCHEMA);
diff --git a/hudi-client/src/test/java/org/apache/hudi/TestHoodieReadClient.java b/hudi-client/src/test/java/org/apache/hudi/TestHoodieReadClient.java
index 858e5ff..03b38fb 100644
--- a/hudi-client/src/test/java/org/apache/hudi/TestHoodieReadClient.java
+++ b/hudi-client/src/test/java/org/apache/hudi/TestHoodieReadClient.java
@@ -40,7 +40,7 @@ import static org.junit.Assert.assertTrue;
 public class TestHoodieReadClient extends TestHoodieClientBase {
 
   /**
-   * Test ReadFilter API after writing new records using HoodieWriteClient.insert
+   * Test ReadFilter API after writing new records using HoodieWriteClient.insert.
    */
   @Test
   public void testReadFilterExistAfterInsert() throws Exception {
@@ -48,7 +48,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
   }
 
   /**
-   * Test ReadFilter API after writing new records using HoodieWriteClient.insertPrepped
+   * Test ReadFilter API after writing new records using HoodieWriteClient.insertPrepped.
    */
   @Test
   public void testReadFilterExistAfterInsertPrepped() throws Exception {
@@ -56,7 +56,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
   }
 
   /**
-   * Test ReadFilter API after writing new records using HoodieWriteClient.bulkInsert
+   * Test ReadFilter API after writing new records using HoodieWriteClient.bulkInsert.
    */
   @Test
   public void testReadFilterExistAfterBulkInsert() throws Exception {
@@ -64,7 +64,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
   }
 
   /**
-   * Test ReadFilter API after writing new records using HoodieWriteClient.bulkInsertPrepped
+   * Test ReadFilter API after writing new records using HoodieWriteClient.bulkInsertPrepped.
    */
   @Test
   public void testReadFilterExistAfterBulkInsertPrepped() throws Exception {
@@ -76,7 +76,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
 
   /**
    * Helper to write new records using one of HoodieWriteClient's write API and use ReadClient to test filterExists()
-   * API works correctly
+   * API works correctly.
    *
    * @param config Hoodie Write Config
    * @param writeFn Write Function for writing records
@@ -111,7 +111,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
   }
 
   /**
-   * Test tagLocation API after insert()
+   * Test tagLocation API after insert().
    */
   @Test
   public void testTagLocationAfterInsert() throws Exception {
@@ -119,7 +119,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
   }
 
   /**
-   * Test tagLocation API after insertPrepped()
+   * Test tagLocation API after insertPrepped().
    */
   @Test
   public void testTagLocationAfterInsertPrepped() throws Exception {
@@ -128,7 +128,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
   }
 
   /**
-   * Test tagLocation API after bulk-insert()
+   * Test tagLocation API after bulk-insert().
    */
   @Test
   public void testTagLocationAfterBulkInsert() throws Exception {
@@ -137,7 +137,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
   }
 
   /**
-   * Test tagLocation API after bulkInsertPrepped()
+   * Test tagLocation API after bulkInsertPrepped().
    */
   @Test
   public void testTagLocationAfterBulkInsertPrepped() throws Exception {
@@ -148,7 +148,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
   }
 
   /**
-   * Helper method to test tagLocation after using different HoodieWriteClient write APIS
+   * Helper method to test tagLocation after using different HoodieWriteClient write APIS.
    *
    * @param hoodieWriteConfig Write Config
    * @param insertFn Hoodie Write Client first Insert API
diff --git a/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestUtils.java b/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestUtils.java
index a7bd7af..9c26ffc 100644
--- a/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestUtils.java
+++ b/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestUtils.java
@@ -192,7 +192,7 @@ public class HoodieClientTestUtils {
   }
 
   /**
-   * Reads the paths under the a hoodie dataset out as a DataFrame
+   * Reads the paths under the a hoodie dataset out as a DataFrame.
    */
   public static Dataset<Row> read(JavaSparkContext jsc, String basePath, SQLContext sqlContext, FileSystem fs,
       String... paths) {
diff --git a/hudi-client/src/test/java/org/apache/hudi/common/HoodieMergeOnReadTestUtils.java b/hudi-client/src/test/java/org/apache/hudi/common/HoodieMergeOnReadTestUtils.java
index facc964..e13e3dc 100644
--- a/hudi-client/src/test/java/org/apache/hudi/common/HoodieMergeOnReadTestUtils.java
+++ b/hudi-client/src/test/java/org/apache/hudi/common/HoodieMergeOnReadTestUtils.java
@@ -41,7 +41,7 @@ import java.util.List;
 import java.util.stream.Collectors;
 
 /**
- * Utility methods to aid in testing MergeOnRead (workaround for HoodieReadClient for MOR)
+ * Utility methods to aid in testing MergeOnRead (workaround for HoodieReadClient for MOR).
  */
 public class HoodieMergeOnReadTestUtils {
 
diff --git a/hudi-client/src/test/java/org/apache/hudi/func/TestBoundedInMemoryQueue.java b/hudi-client/src/test/java/org/apache/hudi/func/TestBoundedInMemoryQueue.java
index 5331c15..4e87c5c 100644
--- a/hudi-client/src/test/java/org/apache/hudi/func/TestBoundedInMemoryQueue.java
+++ b/hudi-client/src/test/java/org/apache/hudi/func/TestBoundedInMemoryQueue.java
@@ -110,7 +110,7 @@ public class TestBoundedInMemoryQueue extends HoodieClientTestHarness {
   }
 
   /**
-   * Test to ensure that we are reading all records from queue iterator when we have multiple producers
+   * Test to ensure that we are reading all records from queue iterator when we have multiple producers.
    */
   @SuppressWarnings("unchecked")
   @Test(timeout = 60000)
diff --git a/hudi-client/src/test/java/org/apache/hudi/index/bloom/TestKeyRangeLookupTree.java b/hudi-client/src/test/java/org/apache/hudi/index/bloom/TestKeyRangeLookupTree.java
index 496e52b..a30eab8 100644
--- a/hudi-client/src/test/java/org/apache/hudi/index/bloom/TestKeyRangeLookupTree.java
+++ b/hudi-client/src/test/java/org/apache/hudi/index/bloom/TestKeyRangeLookupTree.java
@@ -31,7 +31,7 @@ import static junit.framework.TestCase.assertEquals;
 import static junit.framework.TestCase.assertTrue;
 
 /**
- * Tests {@link KeyRangeLookupTree}
+ * Tests {@link KeyRangeLookupTree}.
  */
 public class TestKeyRangeLookupTree {
 
@@ -59,7 +59,7 @@ public class TestKeyRangeLookupTree {
   }
 
   /**
-   * Tests for many entries in the tree with same start value and different end values
+   * Tests for many entries in the tree with same start value and different end values.
    */
   @Test
   public void testFileGroupLookUpManyEntriesWithSameStartValue() {
@@ -78,7 +78,7 @@ public class TestKeyRangeLookupTree {
   }
 
   /**
-   * Tests for many duplicte entries in the tree
+   * Tests for many duplicte entries in the tree.
    */
   @Test
   public void testFileGroupLookUpManyDulicateEntries() {
@@ -158,7 +158,7 @@ public class TestKeyRangeLookupTree {
   }
 
   /**
-   * Updates the expected matches for a given {@link KeyRangeNode}
+   * Updates the expected matches for a given {@link KeyRangeNode}.
    *
    * @param toInsert the {@link KeyRangeNode} to be inserted
    */
diff --git a/hudi-client/src/test/java/org/apache/hudi/io/TestHoodieMergeHandle.java b/hudi-client/src/test/java/org/apache/hudi/io/TestHoodieMergeHandle.java
index 6a46592..7b8d1ce 100644
--- a/hudi-client/src/test/java/org/apache/hudi/io/TestHoodieMergeHandle.java
+++ b/hudi-client/src/test/java/org/apache/hudi/io/TestHoodieMergeHandle.java
@@ -318,7 +318,7 @@ public class TestHoodieMergeHandle extends HoodieClientTestHarness {
   }
 
   /**
-   * Assert no failures in writing hoodie files
+   * Assert no failures in writing hoodie files.
    *
    * @param statuses List of Write Status
    */
@@ -340,7 +340,7 @@ public class TestHoodieMergeHandle extends HoodieClientTestHarness {
   }
 
   /**
-   * Overridden so that we can capture and inspect all success records
+   * Overridden so that we can capture and inspect all success records.
    */
   public static class TestWriteStatus extends WriteStatus {
 
diff --git a/hudi-client/src/test/java/org/apache/hudi/table/TestMergeOnReadTable.java b/hudi-client/src/test/java/org/apache/hudi/table/TestMergeOnReadTable.java
index b9ff1af..ff0fb64 100644
--- a/hudi-client/src/test/java/org/apache/hudi/table/TestMergeOnReadTable.java
+++ b/hudi-client/src/test/java/org/apache/hudi/table/TestMergeOnReadTable.java
@@ -1019,7 +1019,7 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
   }
 
   /**
-   * Test to ensure rolling stats are correctly written to metadata file
+   * Test to ensure rolling stats are correctly written to metadata file.
    */
   @Test
   public void testRollingStatsInMetadata() throws Exception {
@@ -1118,7 +1118,7 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
   }
 
   /**
-   * Test to ensure rolling stats are correctly written to the metadata file, identifies small files and corrects them
+   * Test to ensure rolling stats are correctly written to the metadata file, identifies small files and corrects them.
    */
   @Test
   public void testRollingStatsWithSmallFileHandling() throws Exception {
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/EmptyHoodieRecordPayload.java b/hudi-common/src/main/java/org/apache/hudi/common/model/EmptyHoodieRecordPayload.java
index e12a29c..783422f 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/model/EmptyHoodieRecordPayload.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/model/EmptyHoodieRecordPayload.java
@@ -25,7 +25,7 @@ import org.apache.avro.generic.GenericRecord;
 import org.apache.avro.generic.IndexedRecord;
 
 /**
- * Empty payload used for deletions
+ * Empty payload used for deletions.
  */
 public class EmptyHoodieRecordPayload implements HoodieRecordPayload<EmptyHoodieRecordPayload> {
 
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java
index 2635c3c..7b1bffb 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java
@@ -310,7 +310,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
    *       writer.getCurrentSize(); assertTrue("We just wrote a new block - size2 should be > size1", size2 > size1);
    *       assertEquals("Write should be auto-flushed. The size reported by FileStatus and the writer should match",
    *       size2, fs.getFileStatus(writer.getLogFile().getPath()).getLen()); writer.close(); }
-   **/
+   */
 
   @Test
   public void testAppendNotSupported() throws IOException, URISyntaxException, InterruptedException {
diff --git a/hudi-hadoop-mr/src/main/java/com/uber/hoodie/hadoop/HoodieInputFormat.java b/hudi-hadoop-mr/src/main/java/com/uber/hoodie/hadoop/HoodieInputFormat.java
index b22820e..49588eb 100644
--- a/hudi-hadoop-mr/src/main/java/com/uber/hoodie/hadoop/HoodieInputFormat.java
+++ b/hudi-hadoop-mr/src/main/java/com/uber/hoodie/hadoop/HoodieInputFormat.java
@@ -21,7 +21,7 @@ package com.uber.hoodie.hadoop;
 import org.apache.hudi.hadoop.HoodieParquetInputFormat;
 
 /**
- * Temporary class to allow seamless migration of com.uber.hoodie to org.apache.hudi
+ * Temporary class to allow seamless migration of com.uber.hoodie to org.apache.hudi .
  */
 public class HoodieInputFormat extends HoodieParquetInputFormat {
 
diff --git a/hudi-hadoop-mr/src/main/java/com/uber/hoodie/hadoop/realtime/HoodieRealtimeInputFormat.java b/hudi-hadoop-mr/src/main/java/com/uber/hoodie/hadoop/realtime/HoodieRealtimeInputFormat.java
index 99d3c3a..ec10e16 100644
--- a/hudi-hadoop-mr/src/main/java/com/uber/hoodie/hadoop/realtime/HoodieRealtimeInputFormat.java
+++ b/hudi-hadoop-mr/src/main/java/com/uber/hoodie/hadoop/realtime/HoodieRealtimeInputFormat.java
@@ -21,7 +21,7 @@ package com.uber.hoodie.hadoop.realtime;
 import org.apache.hudi.hadoop.realtime.HoodieParquetRealtimeInputFormat;
 
 /**
- * Temporary class to allow seamless migration of com.uber.hoodie to org.apache.hudi
+ * Temporary class to allow seamless migration of com.uber.hoodie to org.apache.hudi .
  */
 public class HoodieRealtimeInputFormat extends HoodieParquetRealtimeInputFormat {
 
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java
index 51a9805..14ca2f4 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java
@@ -54,7 +54,7 @@ public class HoodieROTablePathFilter implements PathFilter, Serializable {
 
   /**
    * Its quite common, to have all files from a given partition path be passed into accept(), cache the check for hoodie
-   * metadata for known partition paths and the latest versions of files
+   * metadata for known partition paths and the latest versions of files.
    */
   private HashMap<String, HashSet<Path>> hoodiePathCache;
 
@@ -72,7 +72,7 @@ public class HoodieROTablePathFilter implements PathFilter, Serializable {
   }
 
   /**
-   * Obtain the path, two levels from provided path
+   * Obtain the path, two levels from provided path.
    *
    * @return said path if available, null otherwise
    */
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/RecordReaderValueIterator.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/RecordReaderValueIterator.java
index 4201470..21427cc 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/RecordReaderValueIterator.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/RecordReaderValueIterator.java
@@ -29,7 +29,7 @@ import java.util.Iterator;
 import java.util.NoSuchElementException;
 
 /**
- * Provides Iterator Interface to iterate value entries read from record reader
+ * Provides Iterator Interface to iterate value entries read from record reader.
  *
  * @param <K> Key Type
  * @param <V> Value Type
@@ -42,7 +42,7 @@ public class RecordReaderValueIterator<K, V> implements Iterator<V> {
   private V nextVal = null;
 
   /**
-   * Construct RecordReaderValueIterator
+   * Construct RecordReaderValueIterator.
    *
    * @param reader reader
    */
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/UseFileSplitsFromInputFormat.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/UseFileSplitsFromInputFormat.java
index f21e24e..3e1d607 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/UseFileSplitsFromInputFormat.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/UseFileSplitsFromInputFormat.java
@@ -27,7 +27,7 @@ import java.lang.annotation.Target;
 
 /**
  * When annotated on a InputFormat, informs the query engines, that they should use the FileSplits provided by the input
- * format to execute the queries
+ * format to execute the queries.
  */
 @Inherited
 @Documented
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java
index 6512d94..1bf0da1 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java
@@ -486,7 +486,7 @@ public class HoodieCombineHiveInputFormat<K extends WritableComparable, V extend
   }
 
   /**
-   * Gets all the path indices that should not be combined
+   * Gets all the path indices that should not be combined.
    */
   @VisibleForTesting
   public Set<Integer> getNonCombinablePathIndices(JobConf job, Path[] paths, int numThreads)
@@ -602,8 +602,8 @@ public class HoodieCombineHiveInputFormat<K extends WritableComparable, V extend
   }
 
   /**
-   * MOD - Just added this for visibility
-   **/
+   * MOD - Just added this for visibility.
+   */
   Path[] getInputPaths(JobConf job) throws IOException {
     Path[] dirs = FileInputFormat.getInputPaths(job);
     if (dirs.length == 0) {
@@ -786,7 +786,7 @@ public class HoodieCombineHiveInputFormat<K extends WritableComparable, V extend
   }
 
   /**
-   * This is a marker interface that is used to identify the formats where combine split generation is not applicable
+   * This is a marker interface that is used to identify the formats where combine split generation is not applicable.
    */
   public interface AvoidSplitCombination {
 
@@ -794,8 +794,8 @@ public class HoodieCombineHiveInputFormat<K extends WritableComparable, V extend
   }
 
   /**
-   * **MOD** this is the implementation of CombineFileInputFormat which is a copy of
-   * org.apache.hadoop.hive.shims.HadoopShimsSecure.CombineFileInputFormatShim with changes in listStatus
+   * **MOD** This is the implementation of CombineFileInputFormat which is a copy of
+   * org.apache.hadoop.hive.shims.HadoopShimsSecure.CombineFileInputFormatShim with changes in listStatus.
    */
   public static class HoodieCombineFileInputFormatShim<K, V> extends CombineFileInputFormat<K, V>
       implements org.apache.hadoop.hive.shims.HadoopShims.CombineFileInputFormatShim<K, V> {
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java
index 68bf517..a15ed76 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java
@@ -130,7 +130,7 @@ public abstract class AbstractRealtimeRecordReader {
   }
 
   /**
-   * Prints a JSON representation of the ArrayWritable for easier debuggability
+   * Prints a JSON representation of the ArrayWritable for easier debuggability.
    */
   protected static String arrayWritableToString(ArrayWritable writable) {
     if (writable == null) {
@@ -197,7 +197,7 @@ public abstract class AbstractRealtimeRecordReader {
   }
 
   /**
-   * Generate a reader schema off the provided writeSchema, to just project out the provided columns
+   * Generate a reader schema off the provided writeSchema, to just project out the provided columns.
    */
   public static Schema generateProjectionSchema(Schema writeSchema, Map<String, Field> schemaFieldsMap,
       List<String> fieldNames) {
@@ -234,7 +234,7 @@ public abstract class AbstractRealtimeRecordReader {
   }
 
   /**
-   * Convert the projected read from delta record into an array writable
+   * Convert the projected read from delta record into an array writable.
    */
   public static Writable avroToArrayWritable(Object value, Schema schema) {
 
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java
index 6d46728..ef03c0d 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java
@@ -60,7 +60,7 @@ import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
 /**
- * Input Format, that provides a real-time view of data in a Hoodie dataset
+ * Input Format, that provides a real-time view of data in a Hoodie dataset.
  */
 @UseFileSplitsFromInputFormat
 public class HoodieParquetRealtimeInputFormat extends HoodieParquetInputFormat implements Configurable {
@@ -160,7 +160,7 @@ public class HoodieParquetRealtimeInputFormat extends HoodieParquetInputFormat i
   }
 
   /**
-   * Add a field to the existing fields projected
+   * Add a field to the existing fields projected.
    */
   private static Configuration addProjectionField(Configuration conf, String fieldName, int fieldIndex) {
     String readColNames = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "");
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetSerde.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetSerde.java
index 72195f6..0121506 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetSerde.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetSerde.java
@@ -21,7 +21,7 @@ package org.apache.hudi.hadoop.realtime;
 import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
 
 /**
- * Simply extends ParquetHiveSerDe
+ * Simply extends ParquetHiveSerDe.
  */
 public class HoodieParquetSerde extends ParquetHiveSerDe {
 
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieRealtimeRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieRealtimeRecordReader.java
index e156316..8af2f08 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieRealtimeRecordReader.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieRealtimeRecordReader.java
@@ -52,7 +52,7 @@ public class HoodieRealtimeRecordReader implements RecordReader<NullWritable, Ar
   }
 
   /**
-   * Construct record reader based on job configuration
+   * Construct record reader based on job configuration.
    *
    * @param split File Split
    * @param jobConf Job Configuration
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/RealtimeUnmergedRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/RealtimeUnmergedRecordReader.java
index cd6f41d..f1a8eb8 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/RealtimeUnmergedRecordReader.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/RealtimeUnmergedRecordReader.java
@@ -60,7 +60,7 @@ class RealtimeUnmergedRecordReader extends AbstractRealtimeRecordReader
 
   /**
    * Construct a Unmerged record reader that parallely consumes both parquet and log records and buffers for upstream
-   * clients to consume
+   * clients to consume.
    *
    * @param split File split
    * @param job Job Configuration
diff --git a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestRecordReaderValueIterator.java b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestRecordReaderValueIterator.java
index 2dfd036..36799d8 100644
--- a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestRecordReaderValueIterator.java
+++ b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestRecordReaderValueIterator.java
@@ -49,7 +49,7 @@ public class TestRecordReaderValueIterator {
   }
 
   /**
-   * Simple replay record reader for unit-testing
+   * Simple replay record reader for unit-testing.
    */
   private static class TestRecordReader implements RecordReader<IntWritable, Text> {
 
diff --git a/hudi-hive/src/main/java/org/apache/hudi/hive/HiveSyncTool.java b/hudi-hive/src/main/java/org/apache/hudi/hive/HiveSyncTool.java
index ebf1c0f..5acb3d6 100644
--- a/hudi-hive/src/main/java/org/apache/hudi/hive/HiveSyncTool.java
+++ b/hudi-hive/src/main/java/org/apache/hudi/hive/HiveSyncTool.java
@@ -160,7 +160,7 @@ public class HiveSyncTool {
 
   /**
    * Syncs the list of storage parititions passed in (checks if the partition is in hive, if not adds it or if the
-   * partition path does not match, it updates the partition path)
+   * partition path does not match, it updates the partition path).
    */
   private void syncPartitions(List<String> writtenPartitionsSince) {
     try {
diff --git a/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java b/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java
index dd0a460..1dffee5 100644
--- a/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java
+++ b/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java
@@ -132,7 +132,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Add the (NEW) partitons to the table
+   * Add the (NEW) partitons to the table.
    */
   void addPartitionsToTable(List<String> partitionsToAdd) {
     if (partitionsToAdd.isEmpty()) {
@@ -145,7 +145,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Partition path has changed - update the path for te following partitions
+   * Partition path has changed - update the path for te following partitions.
    */
   void updatePartitionsToTable(List<String> changedPartitions) {
     if (changedPartitions.isEmpty()) {
@@ -172,7 +172,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Generate Hive Partition from partition values
+   * Generate Hive Partition from partition values.
    *
    * @param partition Partition path
    * @return
@@ -241,7 +241,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Scan table partitions
+   * Scan table partitions.
    */
   public List<Partition> scanTablePartitions() throws TException {
     return client.listPartitions(syncConfig.databaseName, syncConfig.tableName, (short) -1);
@@ -274,7 +274,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Get the table schema
+   * Get the table schema.
    */
   public Map<String, String> getTableSchema() {
     if (syncConfig.useJdbc) {
@@ -428,7 +428,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Read the schema from the log file on path
+   * Read the schema from the log file on path.
    */
   @SuppressWarnings("OptionalUsedAsFieldOrParameterType")
   private MessageType readSchemaFromLogFile(Option<HoodieInstant> lastCompactionCommitOpt, Path path)
@@ -443,7 +443,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Read the parquet schema from a parquet File
+   * Read the parquet schema from a parquet File.
    */
   private MessageType readSchemaFromDataFile(Path parquetFilePath) throws IOException {
     LOG.info("Reading schema from " + parquetFilePath);
@@ -468,7 +468,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Execute a update in hive metastore with this SQL
+   * Execute a update in hive metastore with this SQL.
    *
    * @param s SQL to execute
    */
@@ -490,7 +490,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Execute a update in hive using Hive Driver
+   * Execute a update in hive using Hive Driver.
    *
    * @param sql SQL statement to execute
    */
@@ -663,7 +663,7 @@ public class HoodieHiveClient {
   }
 
   /**
-   * Partition Event captures any partition that needs to be added or updated
+   * Partition Event captures any partition that needs to be added or updated.
    */
   static class PartitionEvent {
 
diff --git a/hudi-hive/src/main/java/org/apache/hudi/hive/NonPartitionedExtractor.java b/hudi-hive/src/main/java/org/apache/hudi/hive/NonPartitionedExtractor.java
index e122bbd..dc62439 100644
--- a/hudi-hive/src/main/java/org/apache/hudi/hive/NonPartitionedExtractor.java
+++ b/hudi-hive/src/main/java/org/apache/hudi/hive/NonPartitionedExtractor.java
@@ -22,7 +22,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 /**
- * Extractor for Non-partitioned hive tables
+ * Extractor for Non-partitioned hive tables.
  */
 public class NonPartitionedExtractor implements PartitionValueExtractor {
 
diff --git a/hudi-hive/src/main/java/org/apache/hudi/hive/SchemaDifference.java b/hudi-hive/src/main/java/org/apache/hudi/hive/SchemaDifference.java
index 752c099..21152ce 100644
--- a/hudi-hive/src/main/java/org/apache/hudi/hive/SchemaDifference.java
+++ b/hudi-hive/src/main/java/org/apache/hudi/hive/SchemaDifference.java
@@ -29,7 +29,7 @@ import java.util.List;
 import java.util.Map;
 
 /**
- * Represents the schema difference between the storage schema and hive table schema
+ * Represents the schema difference between the storage schema and hive table schema.
  */
 public class SchemaDifference {
 
diff --git a/hudi-hive/src/main/java/org/apache/hudi/hive/util/SchemaUtil.java b/hudi-hive/src/main/java/org/apache/hudi/hive/util/SchemaUtil.java
index 95cc550..a67d2a8 100644
--- a/hudi-hive/src/main/java/org/apache/hudi/hive/util/SchemaUtil.java
+++ b/hudi-hive/src/main/java/org/apache/hudi/hive/util/SchemaUtil.java
@@ -49,14 +49,14 @@ import java.util.Set;
 import java.util.stream.Collectors;
 
 /**
- * Schema Utilities
+ * Schema Utilities.
  */
 public class SchemaUtil {
 
   private static final Logger LOG = LogManager.getLogger(SchemaUtil.class);
 
   /**
-   * Get the schema difference between the storage schema and hive table schema
+   * Get the schema difference between the storage schema and hive table schema.
    */
   public static SchemaDifference getSchemaDifference(MessageType storageSchema, Map<String, String> tableSchema,
       List<String> partitionKeys) {
@@ -135,7 +135,7 @@ public class SchemaUtil {
   }
 
   /**
-   * Returns equivalent Hive table schema read from a parquet file
+   * Returns equivalent Hive table schema read from a parquet file.
    *
    * @param messageType : Parquet Schema
    * @return : Hive Table schema read from parquet file MAP[String,String]
@@ -158,7 +158,7 @@ public class SchemaUtil {
   }
 
   /**
-   * Convert one field data type of parquet schema into an equivalent Hive schema
+   * Convert one field data type of parquet schema into an equivalent Hive schema.
    *
    * @param parquetType : Single paruet field
    * @return : Equivalent sHive schema
@@ -272,7 +272,7 @@ public class SchemaUtil {
   }
 
   /**
-   * Return a 'struct' Hive schema from a list of Parquet fields
+   * Return a 'struct' Hive schema from a list of Parquet fields.
    *
    * @param parquetFields : list of parquet fields
    * @return : Equivalent 'struct' Hive schema
@@ -324,14 +324,14 @@ public class SchemaUtil {
   }
 
   /**
-   * Create a 'Map' schema from Parquet map field
+   * Create a 'Map' schema from Parquet map field.
    */
   private static String createHiveMap(String keyType, String valueType) {
     return "MAP< " + keyType + ", " + valueType + ">";
   }
 
   /**
-   * Create an Array Hive schema from equivalent parquet list type
+   * Create an Array Hive schema from equivalent parquet list type.
    */
   private static String createHiveArray(Type elementType, String elementName) {
     StringBuilder array = new StringBuilder();
@@ -425,7 +425,7 @@ public class SchemaUtil {
   }
 
   /**
-   * Read the schema from the log file on path
+   * Read the schema from the log file on path.
    * 
    * @return
    */
diff --git a/hudi-hive/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java b/hudi-hive/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java
index 8266531..2276c47 100644
--- a/hudi-hive/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java
+++ b/hudi-hive/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java
@@ -74,7 +74,7 @@ public class TestHiveSyncTool {
   }
 
   /**
-   * Testing converting array types to Hive field declaration strings, according to the Parquet-113 spec:
+   * Testing converting array types to Hive field declaration strings. According to the Parquet-113 spec:
    * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#lists
    */
   @Test
diff --git a/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java b/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java
index b83eddc..c9a711c 100644
--- a/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java
+++ b/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java
@@ -64,7 +64,7 @@ public class HiveTestService {
   private static final int CONNECTION_TIMEOUT = 30000;
 
   /**
-   * Configuration settings
+   * Configuration settings.
    */
   private Configuration hadoopConf;
   private String workDir;
diff --git a/hudi-spark/src/main/java/org/apache/hudi/BaseAvroPayload.java b/hudi-spark/src/main/java/org/apache/hudi/BaseAvroPayload.java
index 1268362..b133560 100644
--- a/hudi-spark/src/main/java/org/apache/hudi/BaseAvroPayload.java
+++ b/hudi-spark/src/main/java/org/apache/hudi/BaseAvroPayload.java
@@ -28,17 +28,17 @@ import java.io.IOException;
 import java.io.Serializable;
 
 /**
- * Base class for all AVRO record based payloads, that can be ordered based on a field
+ * Base class for all AVRO record based payloads, that can be ordered based on a field.
  */
 public abstract class BaseAvroPayload implements Serializable {
 
   /**
-   * Avro data extracted from the source converted to bytes
+   * Avro data extracted from the source converted to bytes.
    */
   protected final byte[] recordBytes;
 
   /**
-   * For purposes of preCombining
+   * For purposes of preCombining.
    */
   protected final Comparable orderingVal;
 
diff --git a/hudi-spark/src/main/java/org/apache/hudi/DataSourceUtils.java b/hudi-spark/src/main/java/org/apache/hudi/DataSourceUtils.java
index e7cfcc8..2a8551a 100644
--- a/hudi-spark/src/main/java/org/apache/hudi/DataSourceUtils.java
+++ b/hudi-spark/src/main/java/org/apache/hudi/DataSourceUtils.java
@@ -49,7 +49,7 @@ import java.util.Map;
 import java.util.stream.Collectors;
 
 /**
- * Utilities used throughout the data source
+ * Utilities used throughout the data source.
  */
 public class DataSourceUtils {
 
@@ -120,7 +120,7 @@ public class DataSourceUtils {
   }
 
   /**
-   * Create a partition value extractor class via reflection, passing in any configs needed
+   * Create a partition value extractor class via reflection, passing in any configs needed.
    */
   public static PartitionValueExtractor createPartitionExtractor(String partitionExtractorClass) {
     try {
diff --git a/hudi-spark/src/main/java/org/apache/hudi/HoodieDataSourceHelpers.java b/hudi-spark/src/main/java/org/apache/hudi/HoodieDataSourceHelpers.java
index f9df30a..362cdf0 100644
--- a/hudi-spark/src/main/java/org/apache/hudi/HoodieDataSourceHelpers.java
+++ b/hudi-spark/src/main/java/org/apache/hudi/HoodieDataSourceHelpers.java
@@ -31,7 +31,7 @@ import java.util.List;
 import java.util.stream.Collectors;
 
 /**
- * List of helpers to aid, construction of instanttime for read and write operations using datasource
+ * List of helpers to aid, construction of instanttime for read and write operations using datasource.
  */
 public class HoodieDataSourceHelpers {
 
@@ -53,7 +53,7 @@ public class HoodieDataSourceHelpers {
   }
 
   /**
-   * Returns the last successful write operation's instant time
+   * Returns the last successful write operation's instant time.
    */
   public static String latestCommit(FileSystem fs, String basePath) {
     HoodieTimeline timeline = allCompletedCommitsCompactions(fs, basePath);
diff --git a/hudi-spark/src/main/java/org/apache/hudi/KeyGenerator.java b/hudi-spark/src/main/java/org/apache/hudi/KeyGenerator.java
index 4b8084b..17b5e0b 100644
--- a/hudi-spark/src/main/java/org/apache/hudi/KeyGenerator.java
+++ b/hudi-spark/src/main/java/org/apache/hudi/KeyGenerator.java
@@ -26,7 +26,7 @@ import org.apache.avro.generic.GenericRecord;
 import java.io.Serializable;
 
 /**
- * Abstract class to extend for plugging in extraction of {@link HoodieKey} from an Avro record
+ * Abstract class to extend for plugging in extraction of {@link HoodieKey} from an Avro record.
  */
 public abstract class KeyGenerator implements Serializable {
 
diff --git a/hudi-spark/src/main/java/org/apache/hudi/NonpartitionedKeyGenerator.java b/hudi-spark/src/main/java/org/apache/hudi/NonpartitionedKeyGenerator.java
index 8c0a664..35829a1 100644
--- a/hudi-spark/src/main/java/org/apache/hudi/NonpartitionedKeyGenerator.java
+++ b/hudi-spark/src/main/java/org/apache/hudi/NonpartitionedKeyGenerator.java
@@ -25,7 +25,7 @@ import org.apache.hudi.exception.HoodieKeyException;
 import org.apache.avro.generic.GenericRecord;
 
 /**
- * Simple Key generator for unpartitioned Hive Tables
+ * Simple Key generator for unpartitioned Hive Tables.
  */
 public class NonpartitionedKeyGenerator extends SimpleKeyGenerator {
 
diff --git a/hudi-spark/src/test/java/HoodieJavaApp.java b/hudi-spark/src/test/java/HoodieJavaApp.java
index 389c015..5370e75 100644
--- a/hudi-spark/src/test/java/HoodieJavaApp.java
+++ b/hudi-spark/src/test/java/HoodieJavaApp.java
@@ -45,7 +45,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 /**
- * Sample program that writes & reads hoodie datasets via the Spark datasource
+ * Sample program that writes & reads hoodie datasets via the Spark datasource.
  */
 public class HoodieJavaApp {
 
@@ -232,7 +232,7 @@ public class HoodieJavaApp {
   }
 
   /**
-   * Setup configs for syncing to hive
+   * Setup configs for syncing to hive.
    */
   private DataFrameWriter<Row> updateHiveSyncConfig(DataFrameWriter<Row> writer) {
     if (enableHiveSync) {
diff --git a/hudi-spark/src/test/java/HoodieJavaStreamingApp.java b/hudi-spark/src/test/java/HoodieJavaStreamingApp.java
index c448abb..694ae88 100644
--- a/hudi-spark/src/test/java/HoodieJavaStreamingApp.java
+++ b/hudi-spark/src/test/java/HoodieJavaStreamingApp.java
@@ -45,7 +45,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
 /**
- * Sample program that writes & reads hoodie datasets via the Spark datasource streaming
+ * Sample program that writes & reads hoodie datasets via the Spark datasource streaming.
  */
 public class HoodieJavaStreamingApp {
 
@@ -165,7 +165,7 @@ public class HoodieJavaStreamingApp {
   }
 
   /**
-   * Adding data to the streaming source and showing results over time
+   * Adding data to the streaming source and showing results over time.
    * 
    * @param spark
    * @param fs
@@ -215,7 +215,7 @@ public class HoodieJavaStreamingApp {
   }
 
   /**
-   * Hoodie spark streaming job
+   * Hoodie spark streaming job.
    * 
    * @param streamingInput
    * @throws Exception
@@ -236,7 +236,7 @@ public class HoodieJavaStreamingApp {
   }
 
   /**
-   * Setup configs for syncing to hive
+   * Setup configs for syncing to hive.
    * 
    * @param writer
    * @return
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java
index fcc053f..56cfe23 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java
@@ -49,7 +49,7 @@ import java.util.List;
 import java.util.stream.Collectors;
 
 /**
- * Main REST Handler class that handles local view staleness and delegates calls to slice/data-file/timeline handlers
+ * Main REST Handler class that handles local view staleness and delegates calls to slice/data-file/timeline handlers.
  */
 public class FileSystemViewHandler {
 
@@ -79,7 +79,7 @@ public class FileSystemViewHandler {
   }
 
   /**
-   * Determines if local view of dataset's timeline is behind that of client's view
+   * Determines if local view of dataset's timeline is behind that of client's view.
    */
   private boolean isLocalViewBehind(Context ctx) {
     String basePath = ctx.queryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM);
@@ -108,7 +108,7 @@ public class FileSystemViewHandler {
   }
 
   /**
-   * Syncs data-set view if local view is behind
+   * Syncs data-set view if local view is behind.
    */
   private boolean syncIfLocalViewBehind(Context ctx) {
     if (isLocalViewBehind(ctx)) {
@@ -141,7 +141,7 @@ public class FileSystemViewHandler {
   }
 
   /**
-   * Register Timeline API calls
+   * Register Timeline API calls.
    */
   private void registerTimelineAPI() {
     app.get(RemoteHoodieTableFileSystemView.LAST_INSTANT, new ViewHandler(ctx -> {
@@ -158,7 +158,7 @@ public class FileSystemViewHandler {
   }
 
   /**
-   * Register Data-Files API calls
+   * Register Data-Files API calls.
    */
   private void registerDataFilesAPI() {
     app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_DATA_FILES_URL, new ViewHandler(ctx -> {
@@ -215,7 +215,7 @@ public class FileSystemViewHandler {
   }
 
   /**
-   * Register File Slices API calls
+   * Register File Slices API calls.
    */
   private void registerFileSlicesAPI() {
     app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_SLICES_URL, new ViewHandler(ctx -> {
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java
index 16ff2f7..e8ac49d 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java
@@ -36,7 +36,7 @@ import java.io.IOException;
 import java.io.Serializable;
 
 /**
- * A stand alone timeline service exposing File-System View interfaces to clients
+ * A stand alone timeline service exposing File-System View interfaces to clients.
  */
 public class TimelineService {
 
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/DataFileHandler.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/DataFileHandler.java
index 2b92bbf..d18c20b 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/DataFileHandler.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/DataFileHandler.java
@@ -30,7 +30,7 @@ import java.util.List;
 import java.util.stream.Collectors;
 
 /**
- * REST Handler servicing data-file requests
+ * REST Handler servicing data-file requests.
  */
 public class DataFileHandler extends Handler {
 
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java
index e58f835..7cca939 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java
@@ -32,7 +32,7 @@ import java.util.List;
 import java.util.stream.Collectors;
 
 /**
- * REST Handler servicing file-slice requests
+ * REST Handler servicing file-slice requests.
  */
 public class FileSliceHandler extends Handler {
 
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/TimelineHandler.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/TimelineHandler.java
index faa81a4..75f53a0 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/TimelineHandler.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/TimelineHandler.java
@@ -30,7 +30,7 @@ import java.util.Arrays;
 import java.util.List;
 
 /**
- * REST Handler servicing timeline requests
+ * REST Handler servicing timeline requests.
  */
 public class TimelineHandler extends Handler {
 
diff --git a/hudi-timeline-service/src/test/java/org/apache/hudi/timeline/table/view/TestRemoteHoodieTableFileSystemView.java b/hudi-timeline-service/src/test/java/org/apache/hudi/timeline/table/view/TestRemoteHoodieTableFileSystemView.java
index d69a828..5f72676 100644
--- a/hudi-timeline-service/src/test/java/org/apache/hudi/timeline/table/view/TestRemoteHoodieTableFileSystemView.java
+++ b/hudi-timeline-service/src/test/java/org/apache/hudi/timeline/table/view/TestRemoteHoodieTableFileSystemView.java
@@ -32,7 +32,7 @@ import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 
 /**
- * Bring up a remote Timeline Server and run all test-cases of TestHoodieTableFileSystemView against it
+ * Bring up a remote Timeline Server and run all test-cases of TestHoodieTableFileSystemView against it.
  */
 public class TestRemoteHoodieTableFileSystemView extends TestHoodieTableFileSystemView {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
index 62d2aa7..069c8bb 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
@@ -62,7 +62,7 @@ import java.util.Properties;
 import scala.Tuple2;
 
 /**
- * Loads data from Parquet Sources
+ * Loads data from Parquet Sources.
  */
 public class HDFSParquetImporter implements Serializable {
 
@@ -190,7 +190,7 @@ public class HDFSParquetImporter implements Serializable {
   }
 
   /**
-   * Imports records to Hoodie dataset
+   * Imports records to Hoodie dataset.
    *
    * @param client Hoodie Client
    * @param instantTime Instant Time
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java
index 27c3220..6afcc7a 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java
@@ -41,17 +41,17 @@ public class HoodieCleaner {
   private static volatile Logger log = LogManager.getLogger(HoodieCleaner.class);
 
   /**
-   * Config for Cleaner
+   * Config for Cleaner.
    */
   private final Config cfg;
 
   /**
-   * Filesystem used
+   * Filesystem used.
    */
   private transient FileSystem fs;
 
   /**
-   * Spark context
+   * Spark context.
    */
   private transient JavaSparkContext jssc;
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactionAdminTool.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactionAdminTool.java
index d42a45d..45b904d 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactionAdminTool.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactionAdminTool.java
@@ -56,7 +56,7 @@ public class HoodieCompactionAdminTool {
   }
 
   /**
-   * Executes one of compaction admin operations
+   * Executes one of compaction admin operations.
    */
   public void run(JavaSparkContext jsc) throws Exception {
     HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), cfg.basePath);
@@ -118,7 +118,7 @@ public class HoodieCompactionAdminTool {
   }
 
   /**
-   * Print Operation Result
+   * Print Operation Result.
    *
    * @param initialLine Initial Line
    * @param result Result
@@ -131,14 +131,14 @@ public class HoodieCompactionAdminTool {
   }
 
   /**
-   * Operation Types
+   * Operation Types.
    */
   public enum Operation {
     VALIDATE, UNSCHEDULE_PLAN, UNSCHEDULE_FILE, REPAIR
   }
 
   /**
-   * Admin Configuration Options
+   * Admin Configuration Options.
    */
   public static class Config implements Serializable {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java
index 1f33ca6..663ce0f 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java
@@ -55,7 +55,7 @@ import java.util.List;
 import java.util.Map;
 
 /**
- * Bunch of helper methods
+ * Bunch of helper methods.
  */
 public class UtilHelpers {
   private static Logger logger = LogManager.getLogger(UtilHelpers.class);
@@ -115,7 +115,7 @@ public class UtilHelpers {
   }
 
   /**
-   * Parse Schema from file
+   * Parse Schema from file.
    *
    * @param fs File System
    * @param schemaFile Schema File
@@ -167,7 +167,7 @@ public class UtilHelpers {
   }
 
   /**
-   * Build Spark Context for ingestion/compaction
+   * Build Spark Context for ingestion/compaction.
    * 
    * @return
    */
@@ -178,7 +178,7 @@ public class UtilHelpers {
   }
 
   /**
-   * Build Hoodie write client
+   * Build Hoodie write client.
    *
    * @param jsc Java Spark Context
    * @param basePath Base Path
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/adhoc/UpgradePayloadFromUberToApache.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/adhoc/UpgradePayloadFromUberToApache.java
index 6793d94..bfbcf71 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/adhoc/UpgradePayloadFromUberToApache.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/adhoc/UpgradePayloadFromUberToApache.java
@@ -41,7 +41,7 @@ import java.util.Properties;
 
 /**
  * This is an one-time use class meant for migrating the configuration for "hoodie.compaction.payload.class" in
- * .hoodie/hoodie.properties from com.uber.hoodie to org.apache.hudi It takes in a file containing base-paths for a set
+ * .hoodie/hoodie.properties from com.uber.hoodie to org.apache.hudi . It takes in a file containing base-paths for a set
  * of hudi datasets and does the migration
  */
 public class UpgradePayloadFromUberToApache implements Serializable {
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/AbstractDeltaStreamerService.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/AbstractDeltaStreamerService.java
index bd4d8a2..b6f5306 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/AbstractDeltaStreamerService.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/AbstractDeltaStreamerService.java
@@ -32,7 +32,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 
 /**
- * Base Class for running delta-sync/compaction in separate thread and controlling their life-cyle
+ * Base Class for running delta-sync/compaction in separate thread and controlling their life-cyle.
  */
 public abstract class AbstractDeltaStreamerService implements Serializable {
 
@@ -116,14 +116,14 @@ public abstract class AbstractDeltaStreamerService implements Serializable {
   }
 
   /**
-   * Service implementation
+   * Service implementation.
    * 
    * @return
    */
   protected abstract Pair<CompletableFuture, ExecutorService> startService();
 
   /**
-   * A monitor thread is started which would trigger a callback if the service is shutdown
+   * A monitor thread is started which would trigger a callback if the service is shutdown.
    * 
    * @param onShutdownCallback
    */
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/Compactor.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/Compactor.java
index 3285ba7..140081a 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/Compactor.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/Compactor.java
@@ -33,7 +33,7 @@ import java.io.IOException;
 import java.io.Serializable;
 
 /**
- * Run one round of compaction
+ * Run one round of compaction.
  */
 public class Compactor implements Serializable {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java
index 532ec69..91a9bc6 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java
@@ -78,9 +78,8 @@ import scala.collection.JavaConversions;
 import static org.apache.hudi.utilities.schema.RowBasedSchemaProvider.HOODIE_RECORD_NAMESPACE;
 import static org.apache.hudi.utilities.schema.RowBasedSchemaProvider.HOODIE_RECORD_STRUCT_NAME;
 
-
 /**
- * Sync's one batch of data to hoodie dataset
+ * Sync's one batch of data to hoodie dataset.
  */
 public class DeltaSync implements Serializable {
 
@@ -89,12 +88,12 @@ public class DeltaSync implements Serializable {
   public static String CHECKPOINT_RESET_KEY = "deltastreamer.checkpoint.reset_key";
 
   /**
-   * Delta Sync Config
+   * Delta Sync Config.
    */
   private final HoodieDeltaStreamer.Config cfg;
 
   /**
-   * Source to pull deltas from
+   * Source to pull deltas from.
    */
   private transient SourceFormatAdapter formatAdapter;
 
@@ -104,32 +103,32 @@ public class DeltaSync implements Serializable {
   private transient SchemaProvider schemaProvider;
 
   /**
-   * Allows transforming source to target dataset before writing
+   * Allows transforming source to target dataset before writing.
    */
   private transient Transformer transformer;
 
   /**
-   * Extract the key for the target dataset
+   * Extract the key for the target dataset.
    */
   private KeyGenerator keyGenerator;
 
   /**
-   * Filesystem used
+   * Filesystem used.
    */
   private transient FileSystem fs;
 
   /**
-   * Spark context
+   * Spark context.
    */
   private transient JavaSparkContext jssc;
 
   /**
-   * Spark Session
+   * Spark Session.
    */
   private transient SparkSession sparkSession;
 
   /**
-   * Hive Config
+   * Hive Config.
    */
   private transient HiveConf hiveConf;
 
@@ -139,22 +138,22 @@ public class DeltaSync implements Serializable {
   private final TypedProperties props;
 
   /**
-   * Callback when write client is instantiated
+   * Callback when write client is instantiated.
    */
   private transient Function<HoodieWriteClient, Boolean> onInitializingHoodieWriteClient;
 
   /**
-   * Timeline with completed commits
+   * Timeline with completed commits.
    */
   private transient Option<HoodieTimeline> commitTimelineOpt;
 
   /**
-   * Write Client
+   * Write Client.
    */
   private transient HoodieWriteClient writeClient;
 
   /**
-   * Table Type
+   * Table Type.
    */
   private final HoodieTableType tableType;
 
@@ -190,7 +189,7 @@ public class DeltaSync implements Serializable {
   }
 
   /**
-   * Refresh Timeline
+   * Refresh Timeline.
    */
   private void refreshTimeline() throws IOException {
     if (fs.exists(new Path(cfg.targetBasePath))) {
@@ -204,7 +203,7 @@ public class DeltaSync implements Serializable {
   }
 
   /**
-   * Run one round of delta sync and return new compaction instant if one got scheduled
+   * Run one round of delta sync and return new compaction instant if one got scheduled.
    */
   public Option<String> syncOnce() throws Exception {
     Option<String> scheduledCompaction = Option.empty();
@@ -236,7 +235,7 @@ public class DeltaSync implements Serializable {
   }
 
   /**
-   * Read from Upstream Source and apply transformation if needed
+   * Read from Upstream Source and apply transformation if needed.
    */
   private Pair<SchemaProvider, Pair<String, JavaRDD<HoodieRecord>>> readFromSource(
       Option<HoodieTimeline> commitTimelineOpt) throws Exception {
@@ -321,7 +320,7 @@ public class DeltaSync implements Serializable {
   }
 
   /**
-   * Perform Hoodie Write. Run Cleaner, schedule compaction and syncs to hive if needed
+   * Perform Hoodie Write. Run Cleaner, schedule compaction and syncs to hive if needed.
    *
    * @param records Input Records
    * @param checkpointStr Checkpoint String
@@ -434,7 +433,7 @@ public class DeltaSync implements Serializable {
   }
 
   /**
-   * Sync to Hive
+   * Sync to Hive.
    */
   private void syncHive() throws ClassNotFoundException {
     if (cfg.enableHiveSync) {
@@ -462,7 +461,7 @@ public class DeltaSync implements Serializable {
   }
 
   /**
-   * Helper to construct Write Client config
+   * Helper to construct Write Client config.
    *
    * @param schemaProvider Schema Provider
    */
@@ -491,7 +490,7 @@ public class DeltaSync implements Serializable {
   }
 
   /**
-   * Register Avro Schemas
+   * Register Avro Schemas.
    *
    * @param schemaProvider Schema Provider
    */
@@ -510,7 +509,7 @@ public class DeltaSync implements Serializable {
   }
 
   /**
-   * Close all resources
+   * Close all resources.
    */
   public void close() {
     if (null != writeClient) {
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java
index 9893f0d..baef2ea 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java
@@ -107,7 +107,7 @@ public class HoodieDeltaStreamer implements Serializable {
   }
 
   /**
-   * Main method to start syncing
+   * Main method to start syncing.
    * 
    * @throws Exception
    */
@@ -306,7 +306,7 @@ public class HoodieDeltaStreamer implements Serializable {
   public static class DeltaSyncService extends AbstractDeltaStreamerService {
 
     /**
-     * Delta Sync Config
+     * Delta Sync Config.
      */
     private final HoodieDeltaStreamer.Config cfg;
 
@@ -316,12 +316,12 @@ public class HoodieDeltaStreamer implements Serializable {
     private transient SchemaProvider schemaProvider;
 
     /**
-     * Spark Session
+     * Spark Session.
      */
     private transient SparkSession sparkSession;
 
     /**
-     * Spark context
+     * Spark context.
      */
     private transient JavaSparkContext jssc;
 
@@ -331,17 +331,17 @@ public class HoodieDeltaStreamer implements Serializable {
     TypedProperties props;
 
     /**
-     * Async Compactor Service
+     * Async Compactor Service.
      */
     private AsyncCompactService asyncCompactService;
 
     /**
-     * Table Type
+     * Table Type.
      */
     private final HoodieTableType tableType;
 
     /**
-     * Delta Sync
+     * Delta Sync.
      */
     private transient DeltaSync deltaSync;
 
@@ -419,7 +419,7 @@ public class HoodieDeltaStreamer implements Serializable {
     }
 
     /**
-     * Shutdown compactor as DeltaSync is shutdown
+     * Shutdown compactor as DeltaSync is shutdown.
      */
     private void shutdownCompactor(boolean error) {
       log.info("Delta Sync shutdown. Error ?" + error);
@@ -430,7 +430,7 @@ public class HoodieDeltaStreamer implements Serializable {
     }
 
     /**
-     * Callback to initialize write client and start compaction service if required
+     * Callback to initialize write client and start compaction service if required.
      * 
      * @param writeClient HoodieWriteClient
      * @return
@@ -458,7 +458,7 @@ public class HoodieDeltaStreamer implements Serializable {
     }
 
     /**
-     * Close all resources
+     * Close all resources.
      */
     public void close() {
       if (null != deltaSync) {
@@ -507,14 +507,14 @@ public class HoodieDeltaStreamer implements Serializable {
     }
 
     /**
-     * Enqueues new Pending compaction
+     * Enqueues new Pending compaction.
      */
     public void enqueuePendingCompaction(HoodieInstant instant) {
       pendingCompactions.add(instant);
     }
 
     /**
-     * Wait till outstanding pending compactions reduces to the passed in value
+     * Wait till outstanding pending compactions reduces to the passed in value.
      * 
      * @param numPendingCompactions Maximum pending compactions allowed
      * @throws InterruptedException
@@ -531,7 +531,7 @@ public class HoodieDeltaStreamer implements Serializable {
     }
 
     /**
-     * Fetch Next pending compaction if available
+     * Fetch Next pending compaction if available.
      * 
      * @return
      * @throws InterruptedException
@@ -552,7 +552,7 @@ public class HoodieDeltaStreamer implements Serializable {
     }
 
     /**
-     * Start Compaction Service
+     * Start Compaction Service.
      */
     protected Pair<CompletableFuture, ExecutorService> startService() {
       ExecutorService executor = Executors.newFixedThreadPool(maxConcurrentCompaction);
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SchedulerConfGenerator.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SchedulerConfGenerator.java
index 66d2c47..1e754ba 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SchedulerConfGenerator.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SchedulerConfGenerator.java
@@ -60,7 +60,7 @@ public class SchedulerConfGenerator {
   }
 
   /**
-   * Helper to set Spark Scheduling Configs dynamically
+   * Helper to set Spark Scheduling Configs dynamically.
    *
    * @param cfg Config
    */
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SourceFormatAdapter.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SourceFormatAdapter.java
index b41efcd..65779e0 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SourceFormatAdapter.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SourceFormatAdapter.java
@@ -39,7 +39,7 @@ import static org.apache.hudi.utilities.schema.RowBasedSchemaProvider.HOODIE_REC
 import static org.apache.hudi.utilities.schema.RowBasedSchemaProvider.HOODIE_RECORD_STRUCT_NAME;
 
 /**
- * Adapts data-format provided by the source to the data-format required by the client (DeltaStreamer)
+ * Adapts data-format provided by the source to the data-format required by the client (DeltaStreamer).
  */
 public final class SourceFormatAdapter {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/keygen/TimestampBasedKeyGenerator.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/keygen/TimestampBasedKeyGenerator.java
index 04c6ece..140c709 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/keygen/TimestampBasedKeyGenerator.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/keygen/TimestampBasedKeyGenerator.java
@@ -52,7 +52,7 @@ public class TimestampBasedKeyGenerator extends SimpleKeyGenerator {
   private final String outputDateFormat;
 
   /**
-   * Supported configs
+   * Supported configs.
    */
   static class Config {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/FilebasedSchemaProvider.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/FilebasedSchemaProvider.java
index 5776984..219948a 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/FilebasedSchemaProvider.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/FilebasedSchemaProvider.java
@@ -32,12 +32,12 @@ import java.io.IOException;
 import java.util.Collections;
 
 /**
- * A simple schema provider, that reads off files on DFS
+ * A simple schema provider, that reads off files on DFS.
  */
 public class FilebasedSchemaProvider extends SchemaProvider {
 
   /**
-   * Configs supported
+   * Configs supported.
    */
   public static class Config {
     private static final String SOURCE_SCHEMA_FILE_PROP = "hoodie.deltastreamer.schemaprovider" + ".source.schema.file";
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/SchemaProvider.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/SchemaProvider.java
index 8378383..1a93751 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/SchemaProvider.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/SchemaProvider.java
@@ -26,7 +26,7 @@ import org.apache.spark.api.java.JavaSparkContext;
 import java.io.Serializable;
 
 /**
- * Class to provide schema for reading data and also writing into a Hoodie table
+ * Class to provide schema for reading data and also writing into a Hoodie table.
  */
 public abstract class SchemaProvider implements Serializable {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/SchemaRegistryProvider.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/SchemaRegistryProvider.java
index d03c6da..263ddce 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/SchemaRegistryProvider.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/SchemaRegistryProvider.java
@@ -32,14 +32,14 @@ import java.net.URL;
 import java.util.Collections;
 
 /**
- * Obtains latest schema from the Confluent/Kafka schema-registry
+ * Obtains latest schema from the Confluent/Kafka schema-registry.
  *
  * https://github.com/confluentinc/schema-registry
  */
 public class SchemaRegistryProvider extends SchemaProvider {
 
   /**
-   * Configs supported
+   * Configs supported.
    */
   public static class Config {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroDFSSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroDFSSource.java
index 4cd8f07..e791970 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroDFSSource.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroDFSSource.java
@@ -34,7 +34,7 @@ import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.sql.SparkSession;
 
 /**
- * DFS Source that reads avro data
+ * DFS Source that reads avro data.
  */
 public class AvroDFSSource extends AvroSource {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroKafkaSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroKafkaSource.java
index 9588a81..da7d016 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroKafkaSource.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroKafkaSource.java
@@ -36,7 +36,7 @@ import org.apache.spark.streaming.kafka.KafkaUtils;
 import org.apache.spark.streaming.kafka.OffsetRange;
 
 /**
- * Reads avro serialized Kafka data, based on the confluent schema-registry
+ * Reads avro serialized Kafka data, based on the confluent schema-registry.
  */
 public class AvroKafkaSource extends AvroSource {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HiveIncrPullSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HiveIncrPullSource.java
index 4e4d603..2b54b45 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HiveIncrPullSource.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HiveIncrPullSource.java
@@ -66,7 +66,7 @@ public class HiveIncrPullSource extends AvroSource {
   private final String incrPullRootPath;
 
   /**
-   * Configs supported
+   * Configs supported.
    */
   static class Config {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HoodieIncrSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HoodieIncrSource.java
index 3edb296..67de9c2 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HoodieIncrSource.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HoodieIncrSource.java
@@ -40,25 +40,25 @@ public class HoodieIncrSource extends RowSource {
   protected static class Config {
 
     /**
-     * {@value #HOODIE_SRC_BASE_PATH} is the base-path for the source Hoodie table
+     * {@value #HOODIE_SRC_BASE_PATH} is the base-path for the source Hoodie table.
      */
     private static final String HOODIE_SRC_BASE_PATH = "hoodie.deltastreamer.source.hoodieincr.path";
 
     /**
-     * {@value #NUM_INSTANTS_PER_FETCH} allows the max number of instants whose changes can be incrementally fetched
+     * {@value #NUM_INSTANTS_PER_FETCH} allows the max number of instants whose changes can be incrementally fetched.
      */
     private static final String NUM_INSTANTS_PER_FETCH = "hoodie.deltastreamer.source.hoodieincr.num_instants";
     private static final Integer DEFAULT_NUM_INSTANTS_PER_FETCH = 1;
 
     /**
      * {@value #HOODIE_SRC_PARTITION_FIELDS} specifies partition fields that needs to be added to source table after
-     * parsing _hoodie_partition_path
+     * parsing _hoodie_partition_path.
      */
     private static final String HOODIE_SRC_PARTITION_FIELDS = "hoodie.deltastreamer.source.hoodieincr.partition.fields";
 
     /**
      * {@value #HOODIE_SRC_PARTITION_EXTRACTORCLASS} PartitionValueExtractor class to extract partition fields from
-     * _hoodie_partition_path
+     * _hoodie_partition_path.
      */
     private static final String HOODIE_SRC_PARTITION_EXTRACTORCLASS =
         "hoodie.deltastreamer.source.hoodieincr.partition.extractor.class";
@@ -90,7 +90,7 @@ public class HoodieIncrSource extends RowSource {
      * props.getStringList(Config.HOODIE_SRC_PARTITION_FIELDS, ",", new ArrayList<>()); PartitionValueExtractor
      * extractor = DataSourceUtils.createPartitionExtractor(props.getString( Config.HOODIE_SRC_PARTITION_EXTRACTORCLASS,
      * Config.DEFAULT_HOODIE_SRC_PARTITION_EXTRACTORCLASS));
-     **/
+     */
     String srcPath = props.getString(Config.HOODIE_SRC_BASE_PATH);
     int numInstantsPerFetch = props.getInteger(Config.NUM_INSTANTS_PER_FETCH, Config.DEFAULT_NUM_INSTANTS_PER_FETCH);
     boolean readLatestOnMissingCkpt = props.getBoolean(Config.READ_LATEST_INSTANT_ON_MISSING_CKPT,
@@ -136,7 +136,7 @@ public class HoodieIncrSource extends RowSource {
      * RowFactory.create(rowObjs.toArray()); } return row; }, RowEncoder.apply(newSchema));
      * 
      * log.info("Validated Source Schema :" + validated.schema());
-     **/
+     */
 
     // Remove Hoodie meta columns except partition path from input source
     final Dataset<Row> src = source.drop(HoodieRecord.HOODIE_META_COLUMNS.stream()
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonDFSSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonDFSSource.java
index ed9b82a..37f67e4 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonDFSSource.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonDFSSource.java
@@ -29,7 +29,7 @@ import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.sql.SparkSession;
 
 /**
- * DFS Source that reads json data
+ * DFS Source that reads json data.
  */
 public class JsonDFSSource extends JsonSource {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonKafkaSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonKafkaSource.java
index ba68ac9..0da89f9 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonKafkaSource.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonKafkaSource.java
@@ -34,7 +34,7 @@ import org.apache.spark.streaming.kafka.KafkaUtils;
 import org.apache.spark.streaming.kafka.OffsetRange;
 
 /**
- * Read json kafka data
+ * Read json kafka data.
  */
 public class JsonKafkaSource extends JsonSource {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/ParquetDFSSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/ParquetDFSSource.java
index 3695fce..9f4eab1 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/ParquetDFSSource.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/ParquetDFSSource.java
@@ -32,7 +32,7 @@ import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.sql.SparkSession;
 
 /**
- * DFS Source that reads parquet data
+ * DFS Source that reads parquet data.
  */
 public class ParquetDFSSource extends ParquetSource {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java
index 48e3bd7..c724c99 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java
@@ -63,7 +63,7 @@ public abstract class Source<T> implements Serializable {
   protected abstract InputBatch<T> fetchNewData(Option<String> lastCkptStr, long sourceLimit);
 
   /**
-   * Main API called by Hoodie Delta Streamer to fetch records
+   * Main API called by Hoodie Delta Streamer to fetch records.
    * 
    * @param lastCkptStr Last Checkpoint
    * @param sourceLimit Source Limit
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/AvroConvertor.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/AvroConvertor.java
index 97bb937..9dd2c6a 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/AvroConvertor.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/AvroConvertor.java
@@ -35,20 +35,20 @@ import java.io.Serializable;
 public class AvroConvertor implements Serializable {
 
   /**
-   * To be lazily inited on executors
+   * To be lazily inited on executors.
    */
   private transient Schema schema;
 
   private final String schemaStr;
 
   /**
-   * To be lazily inited on executors
+   * To be lazily inited on executors.
    */
   private transient MercifulJsonConverter jsonConverter;
 
 
   /**
-   * To be lazily inited on executors
+   * To be lazily inited on executors.
    */
   private transient Injection<GenericRecord, byte[]> recordInjection;
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DFSPathSelector.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DFSPathSelector.java
index 2bc0466..ba4a405 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DFSPathSelector.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DFSPathSelector.java
@@ -43,7 +43,7 @@ import java.util.stream.Collectors;
 public class DFSPathSelector {
 
   /**
-   * Configs supported
+   * Configs supported.
    */
   static class Config {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/IncrSourceHelper.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/IncrSourceHelper.java
index c6430ea..19a85eb 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/IncrSourceHelper.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/IncrSourceHelper.java
@@ -31,7 +31,7 @@ import org.apache.spark.sql.Row;
 public class IncrSourceHelper {
 
   /**
-   * Get a timestamp which is the next value in a descending sequence
+   * Get a timestamp which is the next value in a descending sequence.
    *
    * @param timestamp Timestamp
    */
@@ -43,7 +43,7 @@ public class IncrSourceHelper {
   }
 
   /**
-   * Find begin and end instants to be set for the next fetch
+   * Find begin and end instants to be set for the next fetch.
    *
    * @param jssc Java Spark Context
    * @param srcBasePath Base path of Hudi source table
@@ -77,7 +77,7 @@ public class IncrSourceHelper {
   }
 
   /**
-   * Validate instant time seen in the incoming row
+   * Validate instant time seen in the incoming row.
    *
    * @param row Input Row
    * @param instantTime Hoodie Instant time of the row
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java
index 278a352..a4e82dd 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java
@@ -47,7 +47,7 @@ import scala.collection.mutable.StringBuilder;
 import scala.util.Either;
 
 /**
- * Source to read data from Kafka, incrementally
+ * Source to read data from Kafka, incrementally.
  */
 public class KafkaOffsetGen {
 
@@ -162,7 +162,7 @@ public class KafkaOffsetGen {
   }
 
   /**
-   * Kafka reset offset strategies
+   * Kafka reset offset strategies.
    */
   enum KafkaResetOffsetStrategies {
     LARGEST, SMALLEST
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/FlatteningTransformer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/FlatteningTransformer.java
index 7c41e8d..8bfa7bf 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/FlatteningTransformer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/FlatteningTransformer.java
@@ -39,7 +39,9 @@ public class FlatteningTransformer implements Transformer {
   private static final String TMP_TABLE = "HUDI_SRC_TMP_TABLE_";
   private static volatile Logger log = LogManager.getLogger(SqlQueryBasedTransformer.class);
 
-  /** Configs supported */
+  /**
+   * Configs supported.
+   */
   @Override
   public Dataset<Row> apply(JavaSparkContext jsc, SparkSession sparkSession, Dataset<Row> rowDataset,
       TypedProperties properties) {
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/IdentityTransformer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/IdentityTransformer.java
index f74291f..31f0ce6 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/IdentityTransformer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/IdentityTransformer.java
@@ -26,7 +26,7 @@ import org.apache.spark.sql.Row;
 import org.apache.spark.sql.SparkSession;
 
 /**
- * Identity transformer
+ * Identity transformer.
  */
 public class IdentityTransformer implements Transformer {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/SqlQueryBasedTransformer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/SqlQueryBasedTransformer.java
index ff563a1..d7ec911 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/SqlQueryBasedTransformer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/SqlQueryBasedTransformer.java
@@ -42,7 +42,7 @@ public class SqlQueryBasedTransformer implements Transformer {
   private static final String TMP_TABLE = "HOODIE_SRC_TMP_TABLE_";
 
   /**
-   * Configs supported
+   * Configs supported.
    */
   static class Config {
 
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/Transformer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/Transformer.java
index f97b302..46857e6 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/Transformer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/Transformer.java
@@ -26,12 +26,12 @@ import org.apache.spark.sql.Row;
 import org.apache.spark.sql.SparkSession;
 
 /**
- * Transform source to target dataset before writing
+ * Transform source to target dataset before writing.
  */
 public interface Transformer {
 
   /**
-   * Transform source RDD to target RDD
+   * Transform source RDD to target RDD.
    *
    * @param jsc JavaSparkContext
    * @param sparkSession Spark Session
diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieDeltaStreamer.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieDeltaStreamer.java
index d2be913..f4e39ae 100644
--- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieDeltaStreamer.java
+++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieDeltaStreamer.java
@@ -560,12 +560,12 @@ public class TestHoodieDeltaStreamer extends UtilitiesTestBase {
   }
 
   /**
-   * UDF to calculate Haversine distance
+   * UDF to calculate Haversine distance.
    */
   public static class DistanceUDF implements UDF4<Double, Double, Double, Double, Double> {
 
     /**
-     * Returns some random number as distance between the points
+     * Returns some random number as distance between the points.
      * 
      * @param lat1 Latitiude of source
      * @param lat2 Latitude of destination
@@ -580,7 +580,7 @@ public class TestHoodieDeltaStreamer extends UtilitiesTestBase {
   }
 
   /**
-   * Adds a new field "haversine_distance" to the row
+   * Adds a new field "haversine_distance" to the row.
    */
   public static class TripsWithDistanceTransformer implements Transformer {
 
@@ -601,7 +601,7 @@ public class TestHoodieDeltaStreamer extends UtilitiesTestBase {
   }
 
   /**
-   * Return empty dataset
+   * Return empty dataset.
    */
   public static class DropAllTransformer implements Transformer {
 
diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/UtilitiesTestBase.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/UtilitiesTestBase.java
index cfe6798..753f947 100644
--- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/UtilitiesTestBase.java
+++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/UtilitiesTestBase.java
@@ -119,7 +119,7 @@ public class UtilitiesTestBase {
   }
 
   /**
-   * Helper to get hive sync config
+   * Helper to get hive sync config.
    * 
    * @param basePath
    * @param tableName
@@ -140,7 +140,7 @@ public class UtilitiesTestBase {
   }
 
   /**
-   * Initialize Hive DB
+   * Initialize Hive DB.
    * 
    * @throws IOException
    */
diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestDFSSource.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestDFSSource.java
index f8b4869..369e385 100644
--- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestDFSSource.java
+++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestDFSSource.java
@@ -48,7 +48,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 /**
- * Basic tests against all subclasses of {@link JsonDFSSource} and {@link ParquetDFSSource}
+ * Basic tests against all subclasses of {@link JsonDFSSource} and {@link ParquetDFSSource}.
  */
 public class TestDFSSource extends UtilitiesTestBase {
 
diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestKafkaSource.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestKafkaSource.java
index c606621..d5eb6c3 100644
--- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestKafkaSource.java
+++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestKafkaSource.java
@@ -48,7 +48,7 @@ import java.util.HashMap;
 import static org.junit.Assert.assertEquals;
 
 /**
- * Tests against {@link AvroKafkaSource}
+ * Tests against {@link AvroKafkaSource}.
  */
 public class TestKafkaSource extends UtilitiesTestBase {
 
diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/config/TestSourceConfig.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/config/TestSourceConfig.java
index 217c615..f9e1598 100644
--- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/config/TestSourceConfig.java
+++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/config/TestSourceConfig.java
@@ -19,7 +19,7 @@
 package org.apache.hudi.utilities.sources.config;
 
 /**
- * Configurations for Test Data Sources
+ * Configurations for Test Data Sources.
  */
 public class TestSourceConfig {
 
diff --git a/style/checkstyle.xml b/style/checkstyle.xml
index c201cfa..10ef5fc 100644
--- a/style/checkstyle.xml
+++ b/style/checkstyle.xml
@@ -271,9 +271,8 @@
         <module name="EmptyStatement" />
 
         <!-- Checks for Java Docs. -->
-        <module name="JavadocStyle">
-            <property name="severity" value="info"/>
-        </module>
+        <module name="JavadocStyle"/>
+
         <module name="JavadocType">
             <property name="severity" value="info"/>
             <property name="scope" value="protected"/>