You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by yi...@apache.org on 2023/04/15 22:29:07 UTC

[hudi] branch master updated: [MINOR] Fix typos in hudi-common module (#8464)

This is an automated email from the ASF dual-hosted git repository.

yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 6547b916d45 [MINOR] Fix typos in hudi-common module (#8464)
6547b916d45 is described below

commit 6547b916d45aff4f6bdd44d2a2b74d8aa8988fff
Author: Y Ethan Guo <et...@gmail.com>
AuthorDate: Sat Apr 15 15:28:53 2023 -0700

    [MINOR] Fix typos in hudi-common module (#8464)
---
 .../apache/hudi/avro/AvroSchemaCompatibility.java  | 17 ++++++-------
 .../java/org/apache/hudi/avro/HoodieAvroUtils.java |  6 ++---
 .../common/bloom/InternalDynamicBloomFilter.java   |  2 +-
 .../apache/hudi/common/bloom/InternalFilter.java   | 10 ++++----
 .../hudi/common/config/HoodieMetadataConfig.java   |  2 +-
 .../hudi/common/fs/FileSystemRetryConfig.java      |  2 +-
 .../hudi/common/model/HoodiePartitionMetadata.java |  2 +-
 .../hudi/common/model/RewriteAvroPayload.java      |  2 +-
 .../model/debezium/MySqlDebeziumAvroPayload.java   |  2 +-
 .../debezium/PostgresDebeziumAvroPayload.java      |  2 +-
 .../table/log/AbstractHoodieLogRecordReader.java   |  4 ++--
 .../hudi/common/table/timeline/HoodieTimeline.java |  4 ++--
 .../timeline/versioning/MetadataMigrator.java      |  4 ++--
 .../table/view/FileSystemViewStorageConfig.java    |  2 +-
 .../IncrementalTimelineSyncFileSystemView.java     |  2 +-
 .../common/table/view/SyncableFileSystemView.java  |  2 +-
 .../org/apache/hudi/common/util/RateLimiter.java   |  4 ++--
 .../apache/hudi/common/util/ReflectionUtils.java   |  2 +-
 .../common/util/collection/BitCaskDiskMap.java     |  8 +++----
 .../hudi/exception/HoodieHeartbeatException.java   |  2 +-
 .../internal/schema/InternalSchemaBuilder.java     |  2 +-
 .../hudi/internal/schema/action/TableChange.java   |  6 ++---
 .../hudi/internal/schema/action/TableChanges.java  |  8 ++++---
 .../io/AbstractInternalSchemaStorageManager.java   |  2 +-
 .../hudi/keygen/constant/KeyGeneratorOptions.java  |  2 +-
 .../hudi/keygen/constant/KeyGeneratorType.java     |  2 +-
 .../hudi/metadata/MetadataPartitionType.java       |  2 +-
 .../hudi/secondary/index/SecondaryIndexUtils.java  |  2 +-
 .../org/apache/hudi/common/fs/TestFSUtils.java     | 18 +++++++-------
 .../common/fs/inline/TestInLineFileSystem.java     |  2 +-
 .../common/functional/TestHoodieLogFormat.java     | 28 +++++++++++-----------
 .../table/view/TestHoodieTableFileSystemView.java  |  8 +++----
 .../table/view/TestIncrementalFSViewSync.java      | 24 +++++++++----------
 .../hudi/common/testutils/FileSystemTestUtils.java |  2 +-
 .../common/testutils/HoodieCommonTestHarness.java  |  2 +-
 .../apache/hudi/common/util/TestBinaryUtil.java    | 26 ++++++++++----------
 .../hudi/common/util/TestClusteringUtils.java      |  2 +-
 37 files changed, 111 insertions(+), 108 deletions(-)

diff --git a/hudi-common/src/main/java/org/apache/hudi/avro/AvroSchemaCompatibility.java b/hudi-common/src/main/java/org/apache/hudi/avro/AvroSchemaCompatibility.java
index caa789a3132..7a67166e205 100644
--- a/hudi-common/src/main/java/org/apache/hudi/avro/AvroSchemaCompatibility.java
+++ b/hudi-common/src/main/java/org/apache/hudi/avro/AvroSchemaCompatibility.java
@@ -18,12 +18,13 @@
 
 package org.apache.hudi.avro;
 
+import org.apache.hudi.common.util.Either;
+import org.apache.hudi.common.util.Option;
+
 import org.apache.avro.AvroRuntimeException;
 import org.apache.avro.Schema;
 import org.apache.avro.Schema.Field;
 import org.apache.avro.Schema.Type;
-import org.apache.hudi.common.util.Either;
-import org.apache.hudi.common.util.Option;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -223,7 +224,7 @@ public class AvroSchemaCompatibility {
      * Reports the compatibility of a reader/writer schema pair.
      *
      * <p>
-     * Memoizes the compatibility results.
+     * Memorizes the compatibility results.
      * </p>
      *
      * @param reader Reader schema to test.
@@ -240,13 +241,13 @@ public class AvroSchemaCompatibility {
     /**
      * Reports the compatibility of a reader/writer schema pair.
      * <p>
-     * Memoizes the compatibility results.
+     * Memorizes the compatibility results.
      * </p>
      *
-     * @param reader      Reader schema to test.
-     * @param writer      Writer schema to test.
-     * @param locations   Stack tracking the path (chain of locations) within the
-     *                    schema.
+     * @param reader    Reader schema to test.
+     * @param writer    Writer schema to test.
+     * @param locations Stack tracking the path (chain of locations) within the
+     *                  schema.
      * @return the compatibility of the reader/writer schema pair.
      */
     private SchemaCompatibilityResult getCompatibility(final Schema reader,
diff --git a/hudi-common/src/main/java/org/apache/hudi/avro/HoodieAvroUtils.java b/hudi-common/src/main/java/org/apache/hudi/avro/HoodieAvroUtils.java
index 8955b477254..504666b15ce 100644
--- a/hudi-common/src/main/java/org/apache/hudi/avro/HoodieAvroUtils.java
+++ b/hudi-common/src/main/java/org/apache/hudi/avro/HoodieAvroUtils.java
@@ -79,8 +79,8 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
-import java.util.Set;
 import java.util.Properties;
+import java.util.Set;
 import java.util.TimeZone;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
@@ -1116,9 +1116,9 @@ public class HoodieAvroUtils {
    * Given avro records, rewrites them with new schema.
    *
    * @param oldRecords oldRecords to be rewrite
-   * @param newSchema newSchema used to rewrite oldRecord
+   * @param newSchema  newSchema used to rewrite oldRecord
    * @param renameCols a map store all rename cols, (k, v)-> (colNameFromNewSchema, colNameFromOldSchema)
-   * @return a iterator of rewrote GeneriRcords
+   * @return an iterator of rewrote {@link GenericRecord}
    */
   public static Iterator<GenericRecord> rewriteRecordWithNewSchema(Iterator<GenericRecord> oldRecords, Schema newSchema, Map<String, String> renameCols, boolean validate) {
     if (oldRecords == null || newSchema == null) {
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/bloom/InternalDynamicBloomFilter.java b/hudi-common/src/main/java/org/apache/hudi/common/bloom/InternalDynamicBloomFilter.java
index ee77f36379a..c464967a2a2 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/bloom/InternalDynamicBloomFilter.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/bloom/InternalDynamicBloomFilter.java
@@ -27,7 +27,7 @@ import java.io.IOException;
 
 /**
  * Hoodie's internal dynamic Bloom Filter. This is largely based of {@link org.apache.hadoop.util.bloom.DynamicBloomFilter}
- * with bounds on maximum number of entries. Once the max entries is reached, false positive gaurantees are not
+ * with bounds on maximum number of entries. Once the max entries is reached, false positive guarantees are not
  * honored.
  */
 class InternalDynamicBloomFilter extends InternalFilter {
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/bloom/InternalFilter.java b/hudi-common/src/main/java/org/apache/hudi/common/bloom/InternalFilter.java
index 58d185e3ac0..0737622f5a9 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/bloom/InternalFilter.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/bloom/InternalFilter.java
@@ -66,7 +66,7 @@ abstract class InternalFilter implements Writable {
   public abstract void add(Key key);
 
   /**
-   * Determines wether a specified key belongs to <i>this</i> filter.
+   * Determines whether a specified key belongs to <i>this</i> filter.
    *
    * @param key The key to test.
    * @return boolean True if the specified key belongs to <i>this</i> filter. False otherwise.
@@ -74,7 +74,7 @@ abstract class InternalFilter implements Writable {
   public abstract boolean membershipTest(Key key);
 
   /**
-   * Peforms a logical AND between <i>this</i> filter and a specified filter.
+   * Performs a logical AND between <i>this</i> filter and a specified filter.
    * <p>
    * <b>Invariant</b>: The result is assigned to <i>this</i> filter.
    *
@@ -83,7 +83,7 @@ abstract class InternalFilter implements Writable {
   public abstract void and(InternalFilter filter);
 
   /**
-   * Peforms a logical OR between <i>this</i> filter and a specified filter.
+   * Performs a logical OR between <i>this</i> filter and a specified filter.
    * <p>
    * <b>Invariant</b>: The result is assigned to <i>this</i> filter.
    *
@@ -92,7 +92,7 @@ abstract class InternalFilter implements Writable {
   public abstract void or(InternalFilter filter);
 
   /**
-   * Peforms a logical XOR between <i>this</i> filter and a specified filter.
+   * Performs a logical XOR between <i>this</i> filter and a specified filter.
    * <p>
    * <b>Invariant</b>: The result is assigned to <i>this</i> filter.
    *
@@ -163,7 +163,7 @@ abstract class InternalFilter implements Writable {
   @Override
   public void readFields(DataInput in) throws IOException {
     int ver = in.readInt();
-    if (ver > 0) { // old unversioned format
+    if (ver > 0) { // old non-versioned format
       this.nbHash = ver;
       this.hashType = Hash.JENKINS_HASH;
     } else if (ver == VERSION) {
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetadataConfig.java b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetadataConfig.java
index 84d0c1023f8..d4fdc9bbc8b 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetadataConfig.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetadataConfig.java
@@ -242,7 +242,7 @@ public final class HoodieMetadataConfig extends HoodieConfig {
       .defaultValue(false)
       .markAdvanced()
       .sinceVersion("0.13.0")
-      .withDocumentation("Optimized log blocks scanner that addresses all the multiwriter use-cases while appending to log files. "
+      .withDocumentation("Optimized log blocks scanner that addresses all the multi-writer use-cases while appending to log files. "
           + "It also differentiates original blocks written by ingestion writers and compacted blocks written by log compaction.");
 
   private HoodieMetadataConfig() {
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/fs/FileSystemRetryConfig.java b/hudi-common/src/main/java/org/apache/hudi/common/fs/FileSystemRetryConfig.java
index ca423252bb9..2358cddf370 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/fs/FileSystemRetryConfig.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/fs/FileSystemRetryConfig.java
@@ -69,7 +69,7 @@ public class FileSystemRetryConfig  extends HoodieConfig {
       .defaultValue("")
       .markAdvanced()
       .sinceVersion("0.11.0")
-      .withDocumentation("The class name of the Exception that needs to be re-tryed, separated by commas. "
+      .withDocumentation("The class name of the Exception that needs to be retried, separated by commas. "
           + "Default is empty which means retry all the IOException and RuntimeException from FileSystem");
 
   private FileSystemRetryConfig() {
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodiePartitionMetadata.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodiePartitionMetadata.java
index 524ef748366..5dba3d3412d 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodiePartitionMetadata.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodiePartitionMetadata.java
@@ -121,7 +121,7 @@ public class HoodiePartitionMetadata {
         fs.rename(tmpMetaPath, metaPath);
       }
     } catch (IOException ioe) {
-      LOG.warn("Error trying to save partition metadata (this is okay, as long as atleast 1 of these succced), "
+      LOG.warn("Error trying to save partition metadata (this is okay, as long as at least 1 of these succeeded), "
           + partitionPath, ioe);
     } finally {
       if (!metafileExists) {
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/RewriteAvroPayload.java b/hudi-common/src/main/java/org/apache/hudi/common/model/RewriteAvroPayload.java
index bfeaf8770ee..2f28ab2abe3 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/model/RewriteAvroPayload.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/model/RewriteAvroPayload.java
@@ -27,7 +27,7 @@ import org.apache.avro.generic.IndexedRecord;
 import java.io.IOException;
 
 /**
- * Default payload used for rewrite use cases where we dont change schema. We dont need to serialize/deserialize avro record in payload.
+ * Default payload used for rewrite use cases where we don't change schema. We dont need to serialize/deserialize avro record in payload.
  */
 public class RewriteAvroPayload implements HoodieRecordPayload<RewriteAvroPayload> {
 
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/debezium/MySqlDebeziumAvroPayload.java b/hudi-common/src/main/java/org/apache/hudi/common/model/debezium/MySqlDebeziumAvroPayload.java
index 0e0ff54c538..a0a6304fa40 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/model/debezium/MySqlDebeziumAvroPayload.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/model/debezium/MySqlDebeziumAvroPayload.java
@@ -38,7 +38,7 @@ import java.util.Objects;
  * - For inserts, op=i
  * - For deletes, op=d
  * - For updates, op=u
- * - For snapshort inserts, op=r
+ * - For snapshot inserts, op=r
  * <p>
  * This payload implementation will issue matching insert, delete, updates against the hudi table
  */
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/debezium/PostgresDebeziumAvroPayload.java b/hudi-common/src/main/java/org/apache/hudi/common/model/debezium/PostgresDebeziumAvroPayload.java
index ad6b7faf4a6..424f51eb139 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/model/debezium/PostgresDebeziumAvroPayload.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/model/debezium/PostgresDebeziumAvroPayload.java
@@ -42,7 +42,7 @@ import java.util.Properties;
  * - For inserts, op=i
  * - For deletes, op=d
  * - For updates, op=u
- * - For snapshort inserts, op=r
+ * - For snapshot inserts, op=r
  * <p>
  * This payload implementation will issue matching insert, delete, updates against the hudi table
  */
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/log/AbstractHoodieLogRecordReader.java b/hudi-common/src/main/java/org/apache/hudi/common/table/log/AbstractHoodieLogRecordReader.java
index 7af8ee7712a..1c243f199a7 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/table/log/AbstractHoodieLogRecordReader.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/table/log/AbstractHoodieLogRecordReader.java
@@ -33,9 +33,9 @@ import org.apache.hudi.common.table.log.block.HoodieDataBlock;
 import org.apache.hudi.common.table.log.block.HoodieDeleteBlock;
 import org.apache.hudi.common.table.log.block.HoodieLogBlock;
 import org.apache.hudi.common.table.timeline.HoodieTimeline;
-import org.apache.hudi.common.util.collection.ClosableIterator;
 import org.apache.hudi.common.util.InternalSchemaCache;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.collection.ClosableIterator;
 import org.apache.hudi.common.util.collection.CloseableMappingIterator;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.exception.HoodieException;
@@ -408,7 +408,7 @@ public abstract class AbstractHoodieLogRecordReader {
        * First traversal to identify the rollback blocks and valid data and compacted blocks.
        *
        * Scanning blocks is easy to do in single writer mode, where the rollback block is right after the effected data blocks.
-       * With multiwriter mode the blocks can be out of sync. An example scenario.
+       * With multi-writer mode the blocks can be out of sync. An example scenario.
        * B1, B2, B3, B4, R1(B3), B5
        * In this case, rollback block R1 is invalidating the B3 which is not the previous block.
        * This becomes more complicated if we have compacted blocks, which are data blocks created using log compaction.
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieTimeline.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieTimeline.java
index d245be793b7..6b6fb28af86 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieTimeline.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieTimeline.java
@@ -140,7 +140,7 @@ public interface HoodieTimeline extends Serializable {
   /**
    * Filter this timeline to just include the in-flights excluding major and minor compaction instants.
    *
-   * @return New instance of HoodieTimeline with just in-flights excluding majoe and minor compaction instants
+   * @return New instance of HoodieTimeline with just in-flights excluding major and minor compaction instants
    */
   HoodieTimeline filterPendingExcludingMajorAndMinorCompaction();
 
@@ -535,7 +535,7 @@ public interface HoodieTimeline extends Serializable {
     return StringUtils.join(instantTime, HoodieTimeline.REQUESTED_COMPACTION_EXTENSION);
   }
 
-  // Log comaction action
+  // Log compaction action
   static String makeInflightLogCompactionFileName(String instantTime) {
     return StringUtils.join(instantTime, HoodieTimeline.INFLIGHT_LOG_COMPACTION_EXTENSION);
   }
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/MetadataMigrator.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/MetadataMigrator.java
index 23d2a703d26..d61f87e6817 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/MetadataMigrator.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/MetadataMigrator.java
@@ -79,7 +79,7 @@ public class MetadataMigrator<T> {
     if (metadataVersion == targetVersion) {
       return metadata;
     } else if (metadataVersion > targetVersion) {
-      return dowgradeToVersion(metadata, metadataVersion, targetVersion);
+      return downgradeToVersion(metadata, metadataVersion, targetVersion);
     } else {
       return upgradeToVersion(metadata, metadataVersion, targetVersion);
     }
@@ -95,7 +95,7 @@ public class MetadataMigrator<T> {
     return metadata;
   }
 
-  private T dowgradeToVersion(T metadata, int metadataVersion, int targetVersion) {
+  private T downgradeToVersion(T metadata, int metadataVersion, int targetVersion) {
     int newVersion = metadataVersion - 1;
     while (newVersion >= targetVersion) {
       VersionMigrator<T> downgrader = migrators.get(newVersion);
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
index fc61dbba543..038c1d569df 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
@@ -163,7 +163,7 @@ public class FileSystemViewStorageConfig extends HoodieConfig {
       .defaultValue("")
       .markAdvanced()
       .sinceVersion("0.12.1")
-      .withDocumentation("The class name of the Exception that needs to be re-tryed, separated by commas. "
+      .withDocumentation("The class name of the Exception that needs to be retried, separated by commas. "
           + "Default is empty which means retry all the IOException and RuntimeException from Remote Request.");
 
   public static final ConfigProperty<String> REMOTE_BACKUP_VIEW_ENABLE = ConfigProperty
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java
index a98b3f695b2..d8d6644ea27 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java
@@ -422,7 +422,7 @@ public abstract class IncrementalTimelineSyncFileSystemView extends AbstractTabl
         .map(FileSlice::getBaseFile).filter(Option::isPresent).map(Option::get)
         .map(df -> Pair.of(Path.getPathWithoutSchemeAndAuthority(new Path(df.getPath())).toString(), df))
         .collect(Collectors.toMap(Pair::getKey, Pair::getValue));
-    // Note: Delta Log Files and Data FIles can be empty when adding/removing pending compactions
+    // Note: Delta Log Files and Data Files can be empty when adding/removing pending compactions
     Map<String, HoodieBaseFile> deltaDataFiles = deltaFileGroups.stream().flatMap(HoodieFileGroup::getAllRawFileSlices)
         .map(FileSlice::getBaseFile).filter(Option::isPresent).map(Option::get)
         .map(df -> Pair.of(Path.getPathWithoutSchemeAndAuthority(new Path(df.getPath())).toString(), df))
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/SyncableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/SyncableFileSystemView.java
index 18ef931ebca..dc6afaa0bec 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/SyncableFileSystemView.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/SyncableFileSystemView.java
@@ -42,7 +42,7 @@ public interface SyncableFileSystemView
 
   /**
    * Read the latest timeline and refresh the file-system view to match the current state of the file-system. The
-   * refresh can either be done incrementally (from reading file-slices in metadata files) or from scratch by reseting
+   * refresh can either be done incrementally (from reading file-slices in metadata files) or from scratch by resetting
    * view storage.
    */
   void sync();
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/RateLimiter.java b/hudi-common/src/main/java/org/apache/hudi/common/util/RateLimiter.java
index aef6158cba0..b60b8ad0525 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/RateLimiter.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/RateLimiter.java
@@ -74,7 +74,7 @@ public class RateLimiter {
       while (!semaphore.tryAcquire(numOps)) {
         Thread.sleep(WAIT_BEFORE_NEXT_ACQUIRE_PERMIT_IN_MS);
       }
-      LOG.debug(String.format("acquire permits: %s, maxPremits: %s", numOps, maxPermits));
+      LOG.debug(String.format("acquire permits: %s, maxPermits: %s", numOps, maxPermits));
     } catch (InterruptedException e) {
       throw new RuntimeException("Unable to acquire permits", e);
     }
@@ -88,7 +88,7 @@ public class RateLimiter {
   public void releasePermitsPeriodically() {
     scheduler = Executors.newScheduledThreadPool(SCHEDULER_CORE_THREAD_POOL_SIZE);
     scheduler.scheduleAtFixedRate(() -> {
-      LOG.debug(String.format("Release permits: maxPremits: %s, available: %s", maxPermits,
+      LOG.debug(String.format("Release permits: maxPermits: %s, available: %s", maxPermits,
           semaphore.availablePermits()));
       semaphore.release(maxPermits - semaphore.availablePermits());
     }, RELEASE_PERMITS_PERIOD_IN_SECONDS, RELEASE_PERMITS_PERIOD_IN_SECONDS, timePeriod);
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/ReflectionUtils.java b/hudi-common/src/main/java/org/apache/hudi/common/util/ReflectionUtils.java
index e67f43a490d..df6bd3b2a31 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/ReflectionUtils.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/ReflectionUtils.java
@@ -188,7 +188,7 @@ public class ReflectionUtils {
     } catch (NoSuchMethodException e) {
       throw new HoodieException(String.format("Unable to find the method %s of the class %s ",  methodName, clazz), e);
     } catch (InvocationTargetException | IllegalAccessException e) {
-      throw new HoodieException(String.format("Unable to invoke the methond %s of the class %s ",  methodName, clazz), e);
+      throw new HoodieException(String.format("Unable to invoke the method %s of the class %s ", methodName, clazz), e);
     }
   }
 
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/BitCaskDiskMap.java b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/BitCaskDiskMap.java
index ea2519b6694..56787b0c3d1 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/BitCaskDiskMap.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/BitCaskDiskMap.java
@@ -58,10 +58,10 @@ import java.util.zip.InflaterInputStream;
 import static org.apache.hudi.common.util.BinaryUtil.generateChecksum;
 
 /**
- * This class provides a disk spillable only map implementation. All of the data is currenly written to one file,
+ * This class provides a disk spillable only map implementation. All of the data is currently written to one file,
  * without any rollover support. It uses the following : 1) An in-memory map that tracks the key-> latest ValueMetadata.
  * 2) Current position in the file NOTE : Only String.class type supported for Key
- *
+ * <p>
  * Inspired by https://github.com/basho/bitcask
  */
 public final class BitCaskDiskMap<T extends Serializable, R extends Serializable> extends DiskMap<T, R> {
@@ -110,8 +110,8 @@ public final class BitCaskDiskMap<T extends Serializable, R extends Serializable
   }
 
   /**
-   * RandomAcessFile is not thread-safe. This API opens a new file handle per thread and returns.
-   * 
+   * RandomAccessFile is not thread-safe. This API opens a new file handle per thread and returns.
+   *
    * @return
    */
   private BufferedRandomAccessFile getRandomAccessFile() {
diff --git a/hudi-common/src/main/java/org/apache/hudi/exception/HoodieHeartbeatException.java b/hudi-common/src/main/java/org/apache/hudi/exception/HoodieHeartbeatException.java
index eef6baf40e2..b2fa0a0edc7 100644
--- a/hudi-common/src/main/java/org/apache/hudi/exception/HoodieHeartbeatException.java
+++ b/hudi-common/src/main/java/org/apache/hudi/exception/HoodieHeartbeatException.java
@@ -22,7 +22,7 @@ import java.io.Serializable;
 
 /**
  * <p>
- * Exception thrown for Hoodie hearbeat failures. The root of the exception hierarchy.
+ * Exception thrown for Hoodie heartbeat failures. The root of the exception hierarchy.
  * </p>
  * <p>
  * Hoodie Write/Read clients will throw this exception if any of its operations fail. This is a runtime (unchecked)
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
index ff54116a90c..280ddd9013f 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
@@ -71,7 +71,7 @@ public class InternalSchemaBuilder implements Serializable {
    * Use to traverse all types in internalSchema with visitor.
    *
    * @param schema hoodie internal schema
-   * @return vistor expected result.
+   * @return visitor expected result.
    */
   public <T> T visit(InternalSchema schema, InternalSchemaVisitor<T> visitor) {
     return visitor.schema(schema, visit(schema.getRecord(), visitor));
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
index dc48c8c16ba..35b3c781b4e 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
@@ -163,9 +163,9 @@ public interface TableChange {
     protected abstract Integer findIdByFullName(String fullName);
 
     // Modify hudi meta columns is prohibited
-    protected void checkColModifyIsLegal(String colNeedToModfiy) {
-      if (HoodieRecord.HOODIE_META_COLUMNS.stream().anyMatch(f -> f.equalsIgnoreCase(colNeedToModfiy))) {
-        throw new IllegalArgumentException(String.format("cannot modify hudi meta col: %s", colNeedToModfiy));
+    protected void checkColModifyIsLegal(String colNeedToModify) {
+      if (HoodieRecord.HOODIE_META_COLUMNS.stream().anyMatch(f -> f.equalsIgnoreCase(colNeedToModify))) {
+        throw new IllegalArgumentException(String.format("cannot modify hudi meta col: %s", colNeedToModify));
       }
     }
 
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChanges.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChanges.java
index 6056d51d2e8..d039e2e2ed5 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChanges.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChanges.java
@@ -291,7 +291,9 @@ public class TableChanges {
     }
   }
 
-  /** Deal with add columns changes for table. */
+  /**
+   * Deal with add columns changes for table.
+   */
   public static class ColumnAddChange extends TableChange.BaseColumnChange {
     private final Map<String, Integer> fullColName2Id = new HashMap<>();
     private final Map<Integer, ArrayList<Types.Field>> parentId2AddCols = new HashMap<>();
@@ -301,8 +303,8 @@ public class TableChanges {
       return new ColumnAddChange(internalSchema);
     }
 
-    public Type applyAdd(Types.Field orignalField, Type type) {
-      int fieldId = orignalField.fieldId();
+    public Type applyAdd(Types.Field originalField, Type type) {
+      int fieldId = originalField.fieldId();
       ArrayList<Types.Field> addFields = parentId2AddCols.getOrDefault(fieldId, new ArrayList<>());
       ArrayList<ColumnPositionChange> pchanges = positionChangeMap.getOrDefault(fieldId, new ArrayList<>());
 
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/io/AbstractInternalSchemaStorageManager.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/io/AbstractInternalSchemaStorageManager.java
index d4db68425fd..417ffacc24f 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/io/AbstractInternalSchemaStorageManager.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/io/AbstractInternalSchemaStorageManager.java
@@ -36,7 +36,7 @@ abstract class AbstractInternalSchemaStorageManager {
 
   /**
    * Get latest history schema string.
-   * Using give validCommits to validate all legal histroy Schema files, and return the latest one.
+   * Using give validCommits to validate all legal history Schema files, and return the latest one.
    * If the passed valid commits is null or empty, valid instants will be fetched from the file-system and used.
    */
   public abstract String getHistorySchemaStrByGivenValidCommits(List<String> validCommits);
diff --git a/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorOptions.java b/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorOptions.java
index e618735840c..2b2886cf4d1 100644
--- a/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorOptions.java
+++ b/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorOptions.java
@@ -58,7 +58,7 @@ public class KeyGeneratorOptions extends HoodieConfig {
       .key("hoodie.datasource.write.partitionpath.field")
       .noDefaultValue()
       .withDocumentation("Partition path field. Value to be used at the partitionPath component of HoodieKey. "
-          + "Actual value ontained by invoking .toString()");
+          + "Actual value obtained by invoking .toString()");
 
   public static final ConfigProperty<String> KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED = ConfigProperty
       .key("hoodie.datasource.write.keygenerator.consistent.logical.timestamp.enabled")
diff --git a/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorType.java b/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorType.java
index 4babda59249..0de0e716e26 100644
--- a/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorType.java
+++ b/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorType.java
@@ -52,7 +52,7 @@ public enum KeyGeneratorType {
   CUSTOM,
 
   /**
-   * Simple Key generator for unpartitioned Hive Tables.
+   * Simple Key generator for non-partitioned Hive Tables.
    */
   NON_PARTITION,
 
diff --git a/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java b/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java
index 98cbcba3117..9db5fc39ef4 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java
@@ -35,7 +35,7 @@ public enum MetadataPartitionType {
   private final String fileIdPrefix;
   // Total file groups
   // TODO fix: enum should not have any mutable aspect as this compromises whole idea
-  //      of the inum being static, immutable entity
+  //      of the enum being static, immutable entity
   private int fileGroupCount = 1;
 
   MetadataPartitionType(final String partitionPath, final String fileIdPrefix) {
diff --git a/hudi-common/src/main/java/org/apache/hudi/secondary/index/SecondaryIndexUtils.java b/hudi-common/src/main/java/org/apache/hudi/secondary/index/SecondaryIndexUtils.java
index d134a457caf..e5f4d41e9f4 100644
--- a/hudi-common/src/main/java/org/apache/hudi/secondary/index/SecondaryIndexUtils.java
+++ b/hudi-common/src/main/java/org/apache/hudi/secondary/index/SecondaryIndexUtils.java
@@ -50,7 +50,7 @@ public class SecondaryIndexUtils {
   }
 
   /**
-   * Parse secondary index str to List<HOodieSecondaryIndex>
+   * Parse secondary index str to List<HoodieSecondaryIndex>
    *
    * @param jsonStr Secondary indexes with json format
    * @return List<HoodieSecondaryIndex>
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/fs/TestFSUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/fs/TestFSUtils.java
index 7107452e289..250304c7fd0 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/fs/TestFSUtils.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/fs/TestFSUtils.java
@@ -221,7 +221,7 @@ public class TestFSUtils extends HoodieCommonTestHarness {
 
   @Test
   public void testOldLogFileName() {
-    // Check if old log file names are still parseable by FSUtils method
+    // Check if old log file names are still parsable by FSUtils method
     String partitionPath = "2019/01/01/";
     String fileName = UUID.randomUUID().toString();
     String oldLogFile = makeOldLogFileName(fileName, ".log", "100", 1);
@@ -238,7 +238,7 @@ public class TestFSUtils extends HoodieCommonTestHarness {
 
   @Test
   public void tesLogFileName() {
-    // Check if log file names are parseable by FSUtils method
+    // Check if log file names are parsable by FSUtils method
     String partitionPath = "2019/01/01/";
     String fileName = UUID.randomUUID().toString();
     String logFile = FSUtils.makeLogFileName(fileName, ".log", "100", 2, "1-0-1");
@@ -363,14 +363,14 @@ public class TestFSUtils extends HoodieCommonTestHarness {
     String fileId = "Id123";
     int version = 1;
     final String LOG_STR = "log";
-    final String LOG_EXTENTION = "." + LOG_STR;
+    final String LOG_EXTENSION = "." + LOG_STR;
 
     // data file name
     String dataFileName = FSUtils.makeBaseFileName(instantTime, writeToken, fileId);
     assertEquals(instantTime, FSUtils.getCommitTime(dataFileName));
     assertEquals(fileId, FSUtils.getFileId(dataFileName));
 
-    String logFileName = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, version, writeToken);
+    String logFileName = FSUtils.makeLogFileName(fileId, LOG_EXTENSION, instantTime, version, writeToken);
     assertTrue(FSUtils.isLogFile(new Path(logFileName)));
     assertEquals(instantTime, FSUtils.getBaseCommitTimeFromLogPath(new Path(logFileName)));
     assertEquals(fileId, FSUtils.getFileIdFromLogPath(new Path(logFileName)));
@@ -380,17 +380,17 @@ public class TestFSUtils extends HoodieCommonTestHarness {
     // create three versions of log file
     java.nio.file.Path partitionPath = Paths.get(basePath, partitionStr);
     Files.createDirectories(partitionPath);
-    String log1 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 1, writeToken);
+    String log1 = FSUtils.makeLogFileName(fileId, LOG_EXTENSION, instantTime, 1, writeToken);
     Files.createFile(partitionPath.resolve(log1));
-    String log2 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 2, writeToken);
+    String log2 = FSUtils.makeLogFileName(fileId, LOG_EXTENSION, instantTime, 2, writeToken);
     Files.createFile(partitionPath.resolve(log2));
-    String log3 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 3, writeToken);
+    String log3 = FSUtils.makeLogFileName(fileId, LOG_EXTENSION, instantTime, 3, writeToken);
     Files.createFile(partitionPath.resolve(log3));
 
     assertEquals(3, (int) FSUtils.getLatestLogVersion(FSUtils.getFs(basePath, new Configuration()),
-        new Path(partitionPath.toString()), fileId, LOG_EXTENTION, instantTime).get().getLeft());
+        new Path(partitionPath.toString()), fileId, LOG_EXTENSION, instantTime).get().getLeft());
     assertEquals(4, FSUtils.computeNextLogVersion(FSUtils.getFs(basePath, new Configuration()),
-        new Path(partitionPath.toString()), fileId, LOG_EXTENTION, instantTime));
+        new Path(partitionPath.toString()), fileId, LOG_EXTENSION, instantTime));
   }
 
   @Test
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInLineFileSystem.java b/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInLineFileSystem.java
index 88bd35ef4b5..5e80b9ca966 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInLineFileSystem.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInLineFileSystem.java
@@ -242,7 +242,7 @@ public class TestInLineFileSystem {
   @Test
   public void testOpen() throws IOException {
     Path inlinePath = getRandomInlinePath();
-    // open non existant path
+    // open non-existent path
     assertThrows(FileNotFoundException.class, () -> {
       inlinePath.getFileSystem(conf).open(inlinePath);
     }, "Should have thrown exception");
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/functional/TestHoodieLogFormat.java b/hudi-common/src/test/java/org/apache/hudi/common/functional/TestHoodieLogFormat.java
index 3db81187df7..539037c4ccf 100755
--- a/hudi-common/src/test/java/org/apache/hudi/common/functional/TestHoodieLogFormat.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/functional/TestHoodieLogFormat.java
@@ -22,8 +22,8 @@ import org.apache.hudi.avro.HoodieAvroUtils;
 import org.apache.hudi.common.fs.FSUtils;
 import org.apache.hudi.common.model.DeleteRecord;
 import org.apache.hudi.common.model.HoodieArchivedLogFile;
-import org.apache.hudi.common.model.HoodieAvroRecord;
 import org.apache.hudi.common.model.HoodieAvroIndexedRecord;
+import org.apache.hudi.common.model.HoodieAvroRecord;
 import org.apache.hudi.common.model.HoodieAvroRecordMerger;
 import org.apache.hudi.common.model.HoodieLogFile;
 import org.apache.hudi.common.model.HoodieRecord;
@@ -54,9 +54,9 @@ import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
 import org.apache.hudi.common.testutils.HoodieTestUtils;
 import org.apache.hudi.common.testutils.SchemaTestUtil;
 import org.apache.hudi.common.testutils.minicluster.HdfsTestService;
-import org.apache.hudi.common.util.collection.ClosableIterator;
 import org.apache.hudi.common.util.HoodieRecordUtils;
 import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.collection.ClosableIterator;
 import org.apache.hudi.common.util.collection.ExternalSpillableMap;
 import org.apache.hudi.exception.CorruptedLogFileException;
 import org.apache.hudi.exception.HoodieIOException;
@@ -543,13 +543,13 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
         .withFs(fs)
         .build();
 
-    String dataSchameString = "{\"type\":\"record\",\"name\":\"Record\","
+    String dataSchemaString = "{\"type\":\"record\",\"name\":\"Record\","
         + "\"fields\":["
         + "{\"name\":\"uuid\",\"type\":[\"int\",\"null\"]},"
         + "{\"name\":\"name\",\"type\":[\"string\",\"null\"]},"
         + "{\"name\":\"ts\",\"type\":[\"long\",\"null\"]}"
         + "]}";
-    Schema dataSchema = new Schema.Parser().parse(dataSchameString);
+    Schema dataSchema = new Schema.Parser().parse(dataSchemaString);
     Schema cdcSchema = HoodieCDCUtils.schemaBySupplementalLoggingMode(
         HoodieCDCSupplementalLoggingMode.data_before_after, dataSchema);
     GenericRecord insertedRecord = new GenericData.Record(dataSchema);
@@ -850,7 +850,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
   public void testAppendAndReadOnCorruptedLog() throws IOException, URISyntaxException, InterruptedException {
     HoodieLogFile logFile = addValidBlock("test-fileId1", "100", 100);
 
-    // Append some arbit byte[] to the end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     FSDataOutputStream outputStream = fs.append(logFile.getPath());
     // create a block with
@@ -1000,7 +1000,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
     writer.appendBlock(dataBlock);
     writer.close();
 
-    // Append some arbit byte[] to the end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     FSDataOutputStream outputStream = fs.append(writer.getLogFile().getPath());
     // create a block with
@@ -1225,7 +1225,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
 
     // Write 2
     header.put(HoodieLogBlock.HeaderMetadataType.INSTANT_TIME, "101");
-    // Append some arbit byte[] to the end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     FSDataOutputStream outputStream = fs.append(writer.getLogFile().getPath());
     // create a block with
@@ -1877,7 +1877,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
     writer.close();
     FileCreateUtils.createDeltaCommit(basePath, "100", fs);
 
-    // Append some arbit byte[] to the end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     FSDataOutputStream outputStream = fs.append(writer.getLogFile().getPath());
     // create a block with
@@ -1890,7 +1890,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
     outputStream.flush();
     outputStream.close();
 
-    // Append some arbit byte[] to the end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     outputStream = fs.append(writer.getLogFile().getPath());
     // create a block with
@@ -1910,7 +1910,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
     writer.appendBlock(dataBlock);
     writer.close();
 
-    // Append some arbit byte[] to the end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     outputStream = fs.append(writer.getLogFile().getPath());
     // create a block with
@@ -2020,7 +2020,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
     writer.close();
     FileCreateUtils.createDeltaCommit(basePath, "102", fs);
 
-    // Append some arbit byte[] to the end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     FSDataOutputStream outputStream = fs.append(writer.getLogFile().getPath());
     // create a block with
@@ -2033,7 +2033,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
     outputStream.flush();
     outputStream.close();
 
-    // Append some arbit byte[] to the end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     outputStream = fs.append(writer.getLogFile().getPath());
     // create a block with
@@ -2372,7 +2372,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
 
     FileCreateUtils.createDeltaCommit(basePath, "100", fs);
 
-    // Append some arbit byte[] to the end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     FSDataOutputStream outputStream = fs.append(writer.getLogFile().getPath());
     // create a block with
@@ -2693,7 +2693,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
     writer.appendBlock(dataBlock);
     writer.close();
 
-    // Append some arbit byte[] to thee end of the log (mimics a partially written commit)
+    // Append some arbitrary byte[] to the end of the log (mimics a partially written commit)
     fs = FSUtils.getFs(fs.getUri().toString(), fs.getConf());
     FSDataOutputStream outputStream = fs.append(writer.getLogFile().getPath());
     // create a block with
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
index 085cc0b3fc9..d908c1b0949 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
@@ -866,7 +866,7 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
     assertEquals(11, statuses.length);
     refreshFsView();
 
-    // Check files as of lastest commit.
+    // Check files as of latest commit.
     List<FileSlice> allSlices = rtView.getAllFileSlices("2016/05/01").collect(Collectors.toList());
     assertEquals(isLatestFileSliceOnly ? 4 : 8, allSlices.size());
     Map<String, Long> fileSliceMap =
@@ -1434,7 +1434,7 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
     HoodieInstant instant2 = new HoodieInstant(true, HoodieTimeline.REPLACE_COMMIT_ACTION, commitTime2);
     saveAsComplete(commitTimeline, instant2, Option.of(commitMetadata.toJsonString().getBytes(StandardCharsets.UTF_8)));
 
-    //make sure view doesnt include fileId1
+    //make sure view doesn't include fileId1
     refreshFsView();
     assertEquals(0, roView.getLatestBaseFiles(partitionPath1)
         .filter(dfile -> dfile.getFileId().equals(fileId1)).count());
@@ -1590,7 +1590,7 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
         .setClusteringPlan(plan).setOperationType(WriteOperationType.CLUSTER.name()).build();
     metaClient.getActiveTimeline().saveToPendingReplaceCommit(instant2, TimelineMetadataUtils.serializeRequestedReplaceMetadata(requestedReplaceMetadata));
 
-    //make sure view doesnt include fileId1
+    //make sure view doesn't include fileId1
     refreshFsView();
     Set<String> fileIds =
         fsView.getFileGroupsInPendingClustering().map(e -> e.getLeft().getFileId()).collect(Collectors.toSet());
@@ -1942,7 +1942,7 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
   }
 
   /**
-   * Used to verify fils system view on various file systems.
+   * Used to verify file system view on various file systems.
    */
   protected void verifyFileSystemView(String partitionPath, FileSystemViewExpectedState expectedState,
                                       SyncableFileSystemView tableFileSystemView) {
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
index 51ced0b4981..560f209ea03 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
@@ -620,10 +620,10 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
    */
   private void performClean(String instant, List<String> files, String cleanInstant)
       throws IOException {
-    Map<String, List<String>> partititonToFiles = deleteFiles(files);
-    List<HoodieCleanStat> cleanStats = partititonToFiles.entrySet().stream().map(e ->
+    Map<String, List<String>> partitionToFiles = deleteFiles(files);
+    List<HoodieCleanStat> cleanStats = partitionToFiles.entrySet().stream().map(e ->
         new HoodieCleanStat(HoodieCleaningPolicy.KEEP_LATEST_COMMITS, e.getKey(), e.getValue(), e.getValue(),
-        new ArrayList<>(), Integer.toString(Integer.parseInt(instant) + 1), "")).collect(Collectors.toList());
+            new ArrayList<>(), Integer.toString(Integer.parseInt(instant) + 1), "")).collect(Collectors.toList());
 
     HoodieInstant cleanInflightInstant = new HoodieInstant(true, HoodieTimeline.CLEAN_ACTION, cleanInstant);
     metaClient.getActiveTimeline().createNewInstant(cleanInflightInstant);
@@ -641,8 +641,8 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
    */
   private void performRestore(HoodieInstant instant, List<String> files, String rollbackInstant,
       boolean isRestore) throws IOException {
-    Map<String, List<String>> partititonToFiles = deleteFiles(files);
-    List<HoodieRollbackStat> rollbackStats = partititonToFiles.entrySet().stream().map(e ->
+    Map<String, List<String>> partitionToFiles = deleteFiles(files);
+    List<HoodieRollbackStat> rollbackStats = partitionToFiles.entrySet().stream().map(e ->
         new HoodieRollbackStat(e.getKey(), e.getValue(), new ArrayList<>(), new HashMap<>())
     ).collect(Collectors.toList());
 
@@ -677,16 +677,16 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
    * @param files List of files to be deleted
    */
   private Map<String, List<String>> deleteFiles(List<String> files) throws IOException {
-    Map<String, List<String>> partititonToFiles = new HashMap<>();
-    PARTITIONS.forEach(p -> partititonToFiles.put(p, new ArrayList<>()));
+    Map<String, List<String>> partitionToFiles = new HashMap<>();
+    PARTITIONS.forEach(p -> partitionToFiles.put(p, new ArrayList<>()));
 
     for (String f : files) {
       java.nio.file.Path fullPath = Paths.get(metaClient.getBasePathV2().toString(), f);
       Files.delete(fullPath);
       String partition = PARTITIONS.stream().filter(f::startsWith).findAny().get();
-      partititonToFiles.get(partition).add(fullPath.toUri().toString());
+      partitionToFiles.get(partition).add(fullPath.toUri().toString());
     }
-    return partititonToFiles;
+    return partitionToFiles;
   }
 
   /**
@@ -795,9 +795,9 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
   /**
    * Perform one or more rounds of ingestion/compaction and validate incremental timeline syncing.
    *
-   * @param view Hoodie View
-   * @param instants Ingestion/Commit INstants
-   * @param deltaCommit Delta COmmit ?
+   * @param view                      Hoodie View
+   * @param instants                  Ingestion/Commit Instants
+   * @param deltaCommit               Delta Commit
    * @param baseInstantForDeltaCommit Base Instant to be used in case of delta-commit
    * @return List of new file created
    */
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/testutils/FileSystemTestUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/testutils/FileSystemTestUtils.java
index 9178742b0ae..82de0f3317f 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/testutils/FileSystemTestUtils.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/testutils/FileSystemTestUtils.java
@@ -59,7 +59,7 @@ public class FileSystemTestUtils {
   }
 
   public static Path getPhantomFile(Path outerPath, long startOffset, long inlineLength) {
-    // Generate phathom inline file
+    // Generate phantom inline file
     return InLineFSUtils.getInlineFilePath(outerPath, FILE_SCHEME, startOffset, inlineLength);
   }
 
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java b/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java
index f6e871b561c..7e70da23e09 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java
@@ -62,7 +62,7 @@ public class HoodieCommonTestHarness {
   }
 
   /**
-   * Initializes a test data generator which used to generate test datas.
+   * Initializes a test data generator which used to generate test data.
    */
   protected void initTestDataGenerator() {
     dataGen = new HoodieTestDataGenerator();
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestBinaryUtil.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestBinaryUtil.java
index 21123eea24a..488a824f1c3 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestBinaryUtil.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestBinaryUtil.java
@@ -36,10 +36,10 @@ public class TestBinaryUtil {
   public void testIntConvert() {
     // test Int
     int[] testInt = new int[] {-1, 1, -2, 10000, -100000, 2, Integer.MAX_VALUE, Integer.MIN_VALUE};
-    List<OrginValueWrapper<Integer>> valueWrappers = new ArrayList<>();
+    List<OriginValueWrapper<Integer>> valueWrappers = new ArrayList<>();
     List<ConvertResultWrapper<Integer>> convertResultWrappers = new ArrayList<>();
     for (int i = 0; i < testInt.length; i++) {
-      valueWrappers.add(new OrginValueWrapper<>(i, testInt[i]));
+      valueWrappers.add(new OriginValueWrapper<>(i, testInt[i]));
       convertResultWrappers.add(new ConvertResultWrapper<>(i, BinaryUtil.intTo8Byte(testInt[i])));
     }
 
@@ -56,11 +56,11 @@ public class TestBinaryUtil {
   public void testLongConvert() {
     // test Long
     long[] testLong = new long[] {-1L, 1L, -2L, 10000L, -100000L, 2L, Long.MAX_VALUE, Long.MIN_VALUE};
-    List<OrginValueWrapper<Long>> valueWrappers = new ArrayList<>();
+    List<OriginValueWrapper<Long>> valueWrappers = new ArrayList<>();
     List<ConvertResultWrapper<Long>> convertResultWrappers = new ArrayList<>();
     for (int i = 0; i < testLong.length; i++) {
-      valueWrappers.add(new OrginValueWrapper<>((long)i, testLong[i]));
-      convertResultWrappers.add(new ConvertResultWrapper<>((long)i, BinaryUtil.longTo8Byte(testLong[i])));
+      valueWrappers.add(new OriginValueWrapper<>((long) i, testLong[i]));
+      convertResultWrappers.add(new ConvertResultWrapper<>((long) i, BinaryUtil.longTo8Byte(testLong[i])));
     }
 
     Collections.sort(valueWrappers, ((o1, o2) -> o1.originValue.compareTo(o2.originValue)));
@@ -76,11 +76,11 @@ public class TestBinaryUtil {
   public void testDoubleConvert() {
     // test Long
     double[] testDouble = new double[] {-1.00d, 1.05d, -2.3d, 10000.002d, -100000.7d, 2.9d, Double.MAX_VALUE};
-    List<OrginValueWrapper<Double>> valueWrappers = new ArrayList<>();
+    List<OriginValueWrapper<Double>> valueWrappers = new ArrayList<>();
     List<ConvertResultWrapper<Double>> convertResultWrappers = new ArrayList<>();
     for (int i = 0; i < testDouble.length; i++) {
-      valueWrappers.add(new OrginValueWrapper<>((Double)(i * 1.0), testDouble[i]));
-      convertResultWrappers.add(new ConvertResultWrapper<>((Double)(i * 1.0), BinaryUtil.doubleTo8Byte(testDouble[i])));
+      valueWrappers.add(new OriginValueWrapper<>((Double) (i * 1.0), testDouble[i]));
+      convertResultWrappers.add(new ConvertResultWrapper<>((Double) (i * 1.0), BinaryUtil.doubleTo8Byte(testDouble[i])));
     }
 
     Collections.sort(valueWrappers, ((o1, o2) -> o1.originValue.compareTo(o2.originValue)));
@@ -96,11 +96,11 @@ public class TestBinaryUtil {
   public void testFloatConvert() {
     // test Long
     float[] testDouble = new float[] {-1.00f, 1.05f, -2.3f, 10000.002f, -100000.7f, 2.9f, Float.MAX_VALUE, Float.MIN_VALUE};
-    List<OrginValueWrapper<Float>> valueWrappers = new ArrayList<>();
+    List<OriginValueWrapper<Float>> valueWrappers = new ArrayList<>();
     List<ConvertResultWrapper<Float>> convertResultWrappers = new ArrayList<>();
     for (int i = 0; i < testDouble.length; i++) {
-      valueWrappers.add(new OrginValueWrapper<>((float)(i * 1.0), testDouble[i]));
-      convertResultWrappers.add(new ConvertResultWrapper<>((float)(i * 1.0), BinaryUtil.doubleTo8Byte((double) testDouble[i])));
+      valueWrappers.add(new OriginValueWrapper<>((float) (i * 1.0), testDouble[i]));
+      convertResultWrappers.add(new ConvertResultWrapper<>((float) (i * 1.0), BinaryUtil.doubleTo8Byte((double) testDouble[i])));
     }
 
     Collections.sort(valueWrappers, ((o1, o2) -> o1.originValue.compareTo(o2.originValue)));
@@ -122,11 +122,11 @@ public class TestBinaryUtil {
     }
   }
 
-  private class OrginValueWrapper<T> {
+  private class OriginValueWrapper<T> {
     T index;
     T originValue;
 
-    public OrginValueWrapper(T index, T originValue) {
+    public OriginValueWrapper(T index, T originValue) {
       this.index = index;
       this.originValue = originValue;
     }
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestClusteringUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestClusteringUtils.java
index 5235183d10f..052f2923ad3 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestClusteringUtils.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestClusteringUtils.java
@@ -104,7 +104,7 @@ public class TestClusteringUtils extends HoodieCommonTestHarness {
     validateClusteringInstant(fileIds3, partitionPath1, clusterTime, fileGroupToInstantMap);
   }
 
-  // replacecommit.inflight doesnt have clustering plan. 
+  // replacecommit.inflight doesn't have clustering plan.
   // Verify that getClusteringPlan fetches content from corresponding requested file.
   @Test
   public void testClusteringPlanInflight() throws Exception {