You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by sr...@apache.org on 2020/12/07 14:43:28 UTC

[spark] branch master updated: [MINOR] Spelling sql not core

This is an automated email from the ASF dual-hosted git repository.

srowen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new c62b84a  [MINOR] Spelling sql not core
c62b84a is described below

commit c62b84a0432e51fd10e628088ee311dc3be73d2f
Author: Josh Soref <js...@users.noreply.github.com>
AuthorDate: Mon Dec 7 08:40:29 2020 -0600

    [MINOR] Spelling sql not core
    
    ### What changes were proposed in this pull request?
    
    This PR intends to fix typos in the sub-modules:
    * `sql/catalyst`
    * `sql/hive-thriftserver`
    * `sql/hive`
    
    Split per srowen https://github.com/apache/spark/pull/30323#issuecomment-728981618
    
    NOTE: The misspellings have been reported at https://github.com/jsoref/spark/commit/706a726f87a0bbf5e31467fae9015218773db85b#commitcomment-44064356
    
    ### Why are the changes needed?
    
    Misspelled words make it harder to read / understand content.
    
    ### Does this PR introduce _any_ user-facing change?
    
    There are various fixes to documentation, etc...
    
    ### How was this patch tested?
    
    No testing was performed
    
    Closes #30532 from jsoref/spelling-sql-not-core.
    
    Authored-by: Josh Soref <js...@users.noreply.github.com>
    Signed-off-by: Sean Owen <sr...@gmail.com>
---
 sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala   |  6 +++---
 .../scala/org/apache/spark/sql/catalyst/StructFilters.scala  |  2 +-
 .../org/apache/spark/sql/catalyst/analysis/Analyzer.scala    |  2 +-
 .../apache/spark/sql/catalyst/analysis/CheckAnalysis.scala   |  2 +-
 .../spark/sql/catalyst/analysis/StreamingJoinHelper.scala    |  4 ++--
 .../sql/catalyst/analysis/UpdateAttributeNullability.scala   |  2 +-
 .../apache/spark/sql/catalyst/catalog/SessionCatalog.scala   |  2 +-
 .../org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala   |  2 +-
 .../spark/sql/catalyst/encoders/ExpressionEncoder.scala      |  2 +-
 .../apache/spark/sql/catalyst/expressions/AliasHelper.scala  |  2 +-
 .../org/apache/spark/sql/catalyst/expressions/ScalaUDF.scala |  4 ++--
 .../sql/catalyst/expressions/aggregate/Percentile.scala      |  6 +++---
 .../apache/spark/sql/catalyst/expressions/arithmetic.scala   |  2 +-
 .../sql/catalyst/expressions/codegen/CodeGenerator.scala     |  2 +-
 .../expressions/codegen/GenerateSafeProjection.scala         |  2 +-
 .../org/apache/spark/sql/catalyst/expressions/hash.scala     |  4 ++--
 .../sql/catalyst/expressions/higherOrderFunctions.scala      |  2 +-
 .../spark/sql/catalyst/expressions/jsonExpressions.scala     | 12 ++++++------
 .../spark/sql/catalyst/expressions/regexpExpressions.scala   |  2 +-
 .../spark/sql/catalyst/expressions/windowExpressions.scala   |  2 +-
 .../spark/sql/catalyst/optimizer/NestedColumnAliasing.scala  |  2 +-
 .../org/apache/spark/sql/catalyst/optimizer/Optimizer.scala  |  4 ++--
 .../sql/catalyst/optimizer/PushDownLeftSemiAntiJoin.scala    |  2 +-
 .../apache/spark/sql/catalyst/optimizer/expressions.scala    |  2 +-
 .../org/apache/spark/sql/catalyst/optimizer/subquery.scala   |  6 +++---
 .../org/apache/spark/sql/catalyst/parser/ParserUtils.scala   |  2 +-
 .../org/apache/spark/sql/catalyst/plans/QueryPlan.scala      |  2 +-
 .../spark/sql/catalyst/plans/logical/LogicalPlan.scala       |  2 +-
 .../apache/spark/sql/catalyst/plans/logical/PlanHelper.scala |  2 +-
 .../sql/catalyst/plans/logical/basicLogicalOperators.scala   |  2 +-
 .../spark/sql/catalyst/plans/physical/partitioning.scala     |  2 +-
 .../spark/sql/catalyst/util/DateTimeFormatterHelper.scala    |  4 ++--
 .../org/apache/spark/sql/catalyst/util/DateTimeUtils.scala   |  2 +-
 .../apache/spark/sql/catalyst/util/QuantileSummaries.scala   |  6 +++---
 .../main/scala/org/apache/spark/sql/internal/SQLConf.scala   |  6 +++---
 .../scala/org/apache/spark/sql/RandomDataGenerator.scala     |  6 +++---
 .../apache/spark/sql/catalyst/analysis/AnalysisSuite.scala   |  2 +-
 .../catalyst/analysis/ResolveGroupingAnalyticsSuite.scala    |  4 ++--
 .../spark/sql/catalyst/analysis/TypeCoercionSuite.scala      |  2 +-
 .../sql/catalyst/analysis/UnsupportedOperationsSuite.scala   |  2 +-
 .../spark/sql/catalyst/expressions/CodeGenerationSuite.scala |  4 ++--
 .../spark/sql/catalyst/expressions/ComplexTypeSuite.scala    |  4 ++--
 .../catalyst/expressions/ConditionalExpressionSuite.scala    |  4 ++--
 .../sql/catalyst/expressions/ExpressionEvalHelper.scala      |  2 +-
 .../sql/catalyst/expressions/ObjectExpressionsSuite.scala    |  4 ++--
 .../sql/catalyst/expressions/StringExpressionsSuite.scala    |  2 +-
 .../sql/catalyst/expressions/aggregate/PercentileSuite.scala |  8 ++++----
 .../sql/catalyst/expressions/codegen/CodeBlockSuite.scala    |  2 +-
 .../spark/sql/catalyst/optimizer/SetOperationSuite.scala     |  8 ++++----
 .../apache/spark/sql/catalyst/parser/DDLParserSuite.scala    |  2 +-
 .../spark/sql/catalyst/parser/DataTypeParserSuite.scala      |  4 ++--
 .../apache/spark/sql/catalyst/parser/ErrorParserSuite.scala  |  2 +-
 .../spark/sql/catalyst/parser/ExpressionParserSuite.scala    |  4 ++--
 .../sql/catalyst/parser/TableIdentifierParserSuite.scala     |  2 +-
 .../apache/spark/sql/catalyst/util/UnsafeArraySuite.scala    |  8 ++++----
 .../java/org/apache/hive/service/cli/ColumnDescriptor.java   |  2 +-
 .../main/java/org/apache/hive/service/cli/GetInfoValue.java  |  2 +-
 .../hive/service/cli/operation/GetColumnsOperation.java      |  2 +-
 .../org/apache/hive/service/cli/session/HiveSessionImpl.java |  4 ++--
 .../apache/hive/service/cli/thrift/ThriftHttpCLIService.java |  2 +-
 .../apache/spark/sql/hive/thriftserver/DummyListeners.scala  |  2 +-
 .../spark/sql/hive/thriftserver/SparkSQLEnvSuite.scala       |  2 +-
 .../spark/sql/hive/execution/HiveCompatibilitySuite.scala    | 12 ++++++------
 .../org/apache/spark/sql/hive/HiveMetastoreCatalog.scala     |  2 +-
 .../org/apache/spark/sql/hive/client/HiveClientImpl.scala    |  4 ++--
 .../sql/hive/execution/HiveScriptTransformationExec.scala    |  2 +-
 .../spark/sql/hive/execution/InsertIntoHiveTable.scala       |  2 +-
 .../spark/sql/hive/execution/PruneHiveTablePartitions.scala  |  2 +-
 .../src/test/queries/clientpositive/auto_sortmerge_join_13.q |  6 +++---
 .../queries/clientpositive/bucketsortoptimize_insert_3.q     |  4 ++--
 .../ql/src/test/queries/clientpositive/smb_mapjoin_20.q      |  2 +-
 .../test/scala/org/apache/spark/sql/hive/InsertSuite.scala   |  4 ++--
 .../scala/org/apache/spark/sql/hive/StatisticsSuite.scala    |  2 +-
 .../sql/hive/execution/HiveScriptTransformationSuite.scala   |  4 ++--
 .../org/apache/spark/sql/hive/execution/SQLQuerySuite.scala  |  8 ++++----
 75 files changed, 128 insertions(+), 128 deletions(-)

diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
index 88c672f1..d43c57e 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
@@ -351,7 +351,7 @@ trait Row extends Serializable {
   /**
    * Returns the value at position i.
    * For primitive types if value is null it returns 'zero value' specific for primitive
-   * ie. 0 for Int - use isNullAt to ensure that value is not null
+   * i.e. 0 for Int - use isNullAt to ensure that value is not null
    *
    * @throws ClassCastException when data type does not match.
    */
@@ -360,7 +360,7 @@ trait Row extends Serializable {
   /**
    * Returns the value of a given fieldName.
    * For primitive types if value is null it returns 'zero value' specific for primitive
-   * ie. 0 for Int - use isNullAt to ensure that value is not null
+   * i.e. 0 for Int - use isNullAt to ensure that value is not null
    *
    * @throws UnsupportedOperationException when schema is not defined.
    * @throws IllegalArgumentException when fieldName do not exist.
@@ -381,7 +381,7 @@ trait Row extends Serializable {
   /**
    * Returns a Map consisting of names and values for the requested fieldNames
    * For primitive types if value is null it returns 'zero value' specific for primitive
-   * ie. 0 for Int - use isNullAt to ensure that value is not null
+   * i.e. 0 for Int - use isNullAt to ensure that value is not null
    *
    * @throws UnsupportedOperationException when schema is not defined.
    * @throws IllegalArgumentException when fieldName do not exist.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/StructFilters.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/StructFilters.scala
index fed1b32..ff67b6f 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/StructFilters.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/StructFilters.scala
@@ -51,7 +51,7 @@ abstract class StructFilters(pushedFilters: Seq[sources.Filter], schema: StructT
 
   /**
    * Resets states of pushed down filters. The method must be called before
-   * precessing any new row otherwise `skipRow()` may return wrong result.
+   * processing any new row otherwise `skipRow()` may return wrong result.
    */
   def reset(): Unit
 
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 6769dc8..6541961 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -1495,7 +1495,7 @@ class Analyzer(override val catalogManager: CatalogManager)
         val rightRes = rightAttributes
           .map(x => resolveExpressionBottomUp(x, right).asInstanceOf[Attribute])
         f.copy(leftAttributes = leftRes, rightAttributes = rightRes)
-      // intersect/except will be rewritten to join at the begininng of optimizer. Here we need to
+      // intersect/except will be rewritten to join at the beginning of optimizer. Here we need to
       // deduplicate the right side plan, so that we won't produce an invalid self-join later.
       case i @ Intersect(left, right, _) if !i.duplicateResolved =>
         i.copy(right = dedupRight(left, right))
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
index 11c4883..9f5eefc 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
@@ -579,7 +579,7 @@ trait CheckAnalysis extends PredicateHelper {
 
           case showPartitions: ShowPartitions => checkShowPartitions(showPartitions)
 
-          case _ => // Fallbacks to the following checks
+          case _ => // Falls back to the following checks
         }
 
         operator match {
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/StreamingJoinHelper.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/StreamingJoinHelper.scala
index cddc3a4..d8e200d 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/StreamingJoinHelper.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/StreamingJoinHelper.scala
@@ -55,7 +55,7 @@ object StreamingJoinHelper extends PredicateHelper with Logging {
    * given the join condition and the event time watermark. This is how it works.
    * - The condition is split into conjunctive predicates, and we find the predicates of the
    *   form `leftTime + c1 < rightTime + c2`   (or <=, >, >=).
-   * - We canoncalize the predicate and solve it with the event time watermark value to find the
+   * - We canonicalize the predicate and solve it with the event time watermark value to find the
    *  value of the state watermark.
    * This function is supposed to make best-effort attempt to get the state watermark. If there is
    * any error, it will return None.
@@ -94,7 +94,7 @@ object StreamingJoinHelper extends PredicateHelper with Logging {
 
       // The generated the state watermark cleanup expression is inclusive of the state watermark.
       // If state watermark is W, all state where timestamp <= W will be cleaned up.
-      // Now when the canonicalized join condition solves to leftTime >= W, we dont want to clean
+      // Now when the canonicalized join condition solves to leftTime >= W, we don't want to clean
       // up leftTime <= W. Rather we should clean up leftTime <= W - 1. Hence the -1 below.
       val stateWatermark = predicate match {
         case LessThan(l, r) => getStateWatermarkSafely(l, r)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UpdateAttributeNullability.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UpdateAttributeNullability.scala
index 3eae34d..5004108 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UpdateAttributeNullability.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UpdateAttributeNullability.scala
@@ -25,7 +25,7 @@ import org.apache.spark.sql.catalyst.rules.Rule
  * Updates nullability of Attributes in a resolved LogicalPlan by using the nullability of
  * corresponding Attributes of its children output Attributes. This step is needed because
  * users can use a resolved AttributeReference in the Dataset API and outer joins
- * can change the nullability of an AttribtueReference. Without this rule, a nullable column's
+ * can change the nullability of an AttributeReference. Without this rule, a nullable column's
  * nullable field can be actually set as non-nullable, which cause illegal optimization
  * (e.g., NULL propagation) and wrong answers.
  * See SPARK-13484 and SPARK-13801 for the concrete queries of this case.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
index a2ab756..4c32870a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
@@ -1615,7 +1615,7 @@ class SessionCatalog(
   }
 
   /**
-   * Validate the new locatoin before renaming a managed table, which should be non-existent.
+   * Validate the new location before renaming a managed table, which should be non-existent.
    */
   private def validateNewLocationOfRename(
       oldName: TableIdentifier,
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala
index 56677d7..fd9e30d 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala
@@ -143,7 +143,7 @@ class CSVInferSchema(val options: CSVOptions) extends Serializable {
       // The conversion can fail when the `field` is not a form of number.
       val bigDecimal = decimalParser(field)
       // Because many other formats do not support decimal, it reduces the cases for
-      // decimals by disallowing values having scale (eg. `1.1`).
+      // decimals by disallowing values having scale (e.g. `1.1`).
       if (bigDecimal.scale <= 0) {
         // `DecimalType` conversion can fail when
         //   1. The precision is bigger than 38.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
index 9ab3804..80a0374 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
@@ -189,7 +189,7 @@ object ExpressionEncoder {
   }
 
   /**
-   * Function that serializesa an object of type `T` to an [[InternalRow]]. This class is not
+   * Function that serializes an object of type `T` to an [[InternalRow]]. This class is not
    * thread-safe. Note that multiple calls to `apply(..)` return the same actual [[InternalRow]]
    * object.  Thus, the caller should copy the result before making another call if required.
    */
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/AliasHelper.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/AliasHelper.scala
index c61eb68..ad6cf95 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/AliasHelper.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/AliasHelper.scala
@@ -64,7 +64,7 @@ trait AliasHelper {
 
   /**
    * Replace all attributes, that reference an alias, with the aliased expression,
-   * but keep the name of the outmost attribute.
+   * but keep the name of the outermost attribute.
    */
   protected def replaceAliasButKeepName(
      expr: NamedExpression,
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDF.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDF.scala
index 0a69d5a..4a89d24 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDF.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUDF.scala
@@ -1145,7 +1145,7 @@ case class ScalaUDF(
     val resultConverter = s"$convertersTerm[${children.length}]"
     val boxedType = CodeGenerator.boxedType(dataType)
 
-    val funcInvokation = if (isPrimitive(dataType)
+    val funcInvocation = if (isPrimitive(dataType)
         // If the output is nullable, the returned value must be unwrapped from the Option
         && !nullable) {
       s"$resultTerm = ($boxedType)$getFuncResult"
@@ -1156,7 +1156,7 @@ case class ScalaUDF(
       s"""
          |$boxedType $resultTerm = null;
          |try {
-         |  $funcInvokation;
+         |  $funcInvocation;
          |} catch (Exception e) {
          |  throw new org.apache.spark.SparkException($errorMsgTerm, e);
          |}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Percentile.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Percentile.scala
index 0eba61c..b808083 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Percentile.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Percentile.scala
@@ -191,13 +191,13 @@ case class Percentile(
 
     val sortedCounts = buffer.toSeq.sortBy(_._1)(
       child.dataType.asInstanceOf[NumericType].ordering.asInstanceOf[Ordering[AnyRef]])
-    val accumlatedCounts = sortedCounts.scanLeft((sortedCounts.head._1, 0L)) {
+    val accumulatedCounts = sortedCounts.scanLeft((sortedCounts.head._1, 0L)) {
       case ((key1, count1), (key2, count2)) => (key2, count1 + count2)
     }.tail
-    val maxPosition = accumlatedCounts.last._2 - 1
+    val maxPosition = accumulatedCounts.last._2 - 1
 
     percentages.map { percentile =>
-      getPercentile(accumlatedCounts, maxPosition * percentile)
+      getPercentile(accumulatedCounts, maxPosition * percentile)
     }
   }
 
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
index c69edcc..3fbb798 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
@@ -216,7 +216,7 @@ abstract class BinaryArithmetic extends BinaryOperator with NullIntolerant {
     case DoubleType | FloatType =>
       // When Double/Float overflows, there can be 2 cases:
       // - precision loss: according to SQL standard, the number is truncated;
-      // - returns (+/-)Infinite: same behavior also other DBs have (eg. Postgres)
+      // - returns (+/-)Infinite: same behavior also other DBs have (e.g. Postgres)
       nullSafeCodeGen(ctx, ev, (eval1, eval2) => {
         s"""
            |${ev.value} = $eval1 $symbol $eval2;
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
index 1ff4a93..638878b 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
@@ -175,7 +175,7 @@ class CodegenContext extends Logging {
     mutable.ArrayBuffer.empty[(String, String)]
 
   /**
-   * The mapping between mutable state types and corrseponding compacted arrays.
+   * The mapping between mutable state types and corresponding compacted arrays.
    * The keys are java type string. The values are [[MutableStateArrays]] which encapsulates
    * the compacted arrays for the mutable states with the same java type.
    *
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateSafeProjection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateSafeProjection.scala
index e285398..4efcca0 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateSafeProjection.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateSafeProjection.scala
@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, ArrayData, Generic
 import org.apache.spark.sql.types._
 
 /**
- * Java can not access Projection (in package object)
+ * Java cannot access Projection (in package object)
  */
 abstract class BaseProjection extends Projection {}
 
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/hash.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/hash.scala
index 6436082..ce177f5 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/hash.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/hash.scala
@@ -907,7 +907,7 @@ object HiveHashFunction extends InterpretedHashFunction {
    * - year, month (stored as HiveIntervalYearMonth)
    * - day, hour, minute, second, nanosecond (stored as HiveIntervalDayTime)
    *
-   * eg. (INTERVAL '30' YEAR + INTERVAL '-23' DAY) fails in Hive
+   * e.g. (INTERVAL '30' YEAR + INTERVAL '-23' DAY) fails in Hive
    *
    * This method mimics HiveIntervalDayTime.hashCode() in Hive.
    *
@@ -919,7 +919,7 @@ object HiveHashFunction extends InterpretedHashFunction {
    *
    * - Spark's [[CalendarInterval]] has precision upto microseconds but Hive's
    *   HiveIntervalDayTime can store data with precision upto nanoseconds. So, any input intervals
-   *   with nanosecond values will lead to wrong output hashes (ie. non adherent with Hive output)
+   *   with nanosecond values will lead to wrong output hashes (i.e. non adherent with Hive output)
    */
   def hashCalendarInterval(calendarInterval: CalendarInterval): Long = {
     val totalMicroSeconds = calendarInterval.days * MICROS_PER_DAY + calendarInterval.microseconds
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/higherOrderFunctions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/higherOrderFunctions.scala
index 4454afb..d1dabe7 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/higherOrderFunctions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/higherOrderFunctions.scala
@@ -128,7 +128,7 @@ trait HigherOrderFunction extends Expression with ExpectsInputTypes {
   def argumentTypes: Seq[AbstractDataType]
 
   /**
-   * All arguments have been resolved. This means that the types and nullabilty of (most of) the
+   * All arguments have been resolved. This means that the types and nullability of (most of) the
    * lambda function arguments is known, and that we can start binding the lambda functions.
    */
   lazy val argumentsResolved: Boolean = arguments.forall(_.resolved)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala
index a363615..c22b688 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala
@@ -808,10 +808,10 @@ case class SchemaOfJson(
 }
 
 /**
- * A function that returns the number of elements in the outmost JSON array.
+ * A function that returns the number of elements in the outermost JSON array.
  */
 @ExpressionDescription(
-  usage = "_FUNC_(jsonArray) - Returns the number of elements in the outmost JSON array.",
+  usage = "_FUNC_(jsonArray) - Returns the number of elements in the outermost JSON array.",
   arguments = """
     Arguments:
       * jsonArray - A JSON array. `NULL` is returned in case of any other valid JSON string,
@@ -877,13 +877,13 @@ case class LengthOfJsonArray(child: Expression) extends UnaryExpression
 }
 
 /**
- * A function which returns all the keys of the outmost JSON object.
+ * A function which returns all the keys of the outermost JSON object.
  */
 @ExpressionDescription(
-  usage = "_FUNC_(json_object) - Returns all the keys of the outmost JSON object as an array.",
+  usage = "_FUNC_(json_object) - Returns all the keys of the outermost JSON object as an array.",
   arguments = """
     Arguments:
-      * json_object - A JSON object. If a valid JSON object is given, all the keys of the outmost
+      * json_object - A JSON object. If a valid JSON object is given, all the keys of the outermost
           object will be returned as an array. If it is any other valid JSON string, an invalid JSON
           string or an empty string, the function returns null.
   """,
@@ -921,7 +921,7 @@ case class JsonObjectKeys(child: Expression) extends UnaryExpression with Codege
           if (parser.nextToken() == null || parser.currentToken() != JsonToken.START_OBJECT) {
             return null
           }
-          // Parse the JSON string to get all the keys of outmost JSON object
+          // Parse the JSON string to get all the keys of outermost JSON object
           getJsonKeys(parser, input)
         }
       }
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala
index 0b94fe8..28c9aef 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala
@@ -93,7 +93,7 @@ abstract class StringRegexExpression extends BinaryExpression
           Since Spark 2.0, string literals are unescaped in our SQL parser. For example, in order
           to match "\abc", the pattern should be "\\abc".
 
-          When SQL config 'spark.sql.parser.escapedStringLiterals' is enabled, it fallbacks
+          When SQL config 'spark.sql.parser.escapedStringLiterals' is enabled, it falls back
           to Spark 1.6 behavior regarding string literal parsing. For example, if the config is
           enabled, the pattern to match "\abc" should be "\abc".
       * escape - an character added since Spark 3.0. The default escape character is the '\'.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
index b6dd817..43ecbd6 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
@@ -173,7 +173,7 @@ sealed trait WindowFrame extends Expression with Unevaluable {
 case object UnspecifiedFrame extends WindowFrame
 
 /**
- * A specified Window Frame. The val lower/uppper can be either a foldable [[Expression]] or a
+ * A specified Window Frame. The val lower/upper can be either a foldable [[Expression]] or a
  * [[SpecialFrameBoundary]].
  */
 case class SpecifiedWindowFrame(
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/NestedColumnAliasing.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/NestedColumnAliasing.scala
index b053bf6d..0be2792 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/NestedColumnAliasing.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/NestedColumnAliasing.scala
@@ -227,7 +227,7 @@ object NestedColumnAliasing {
 }
 
 /**
- * This prunes unnessary nested columns from `Generate` and optional `Project` on top
+ * This prunes unnecessary nested columns from `Generate` and optional `Project` on top
  * of it.
  */
 object GeneratorNestedColumnAliasing {
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
index b7c8f77..aa8540f 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
@@ -853,7 +853,7 @@ object CollapseWindow extends Rule[LogicalPlan] {
  *   of the child window expression, transpose them.
  */
 object TransposeWindow extends Rule[LogicalPlan] {
-  private def compatibleParititions(ps1 : Seq[Expression], ps2: Seq[Expression]): Boolean = {
+  private def compatiblePartitions(ps1 : Seq[Expression], ps2: Seq[Expression]): Boolean = {
     ps1.length < ps2.length && ps2.take(ps1.length).permutations.exists(ps1.zip(_).forall {
       case (l, r) => l.semanticEquals(r)
     })
@@ -864,7 +864,7 @@ object TransposeWindow extends Rule[LogicalPlan] {
         if w1.references.intersect(w2.windowOutputSet).isEmpty &&
            w1.expressions.forall(_.deterministic) &&
            w2.expressions.forall(_.deterministic) &&
-           compatibleParititions(ps1, ps2) =>
+           compatiblePartitions(ps1, ps2) =>
       Project(w1.output, Window(we2, ps2, os2, Window(we1, ps1, os1, grandChild)))
   }
 }
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PushDownLeftSemiAntiJoin.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PushDownLeftSemiAntiJoin.scala
index 50fe019..286b447 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PushDownLeftSemiAntiJoin.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PushDownLeftSemiAntiJoin.scala
@@ -172,7 +172,7 @@ object PushDownLeftSemiAntiJoin extends Rule[LogicalPlan] with PredicateHelper {
  * TODO:
  * Currently this rule can push down the left semi or left anti joins to either
  * left or right leg of the child join. This matches the behaviour of `PushPredicateThroughJoin`
- * when the lefi semi or left anti join is in expression form. We need to explore the possibility
+ * when the left semi or left anti join is in expression form. We need to explore the possibility
  * to push the left semi/anti joins to both legs of join if the join condition refers to
  * both left and right legs of the child join.
  */
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala
index 1b1e2ad..4cdaf10 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala
@@ -70,7 +70,7 @@ object ConstantFolding extends Rule[LogicalPlan] {
 /**
  * Substitutes [[Attribute Attributes]] which can be statically evaluated with their corresponding
  * value in conjunctive [[Expression Expressions]]
- * eg.
+ * e.g.
  * {{{
  *   SELECT * FROM table WHERE i = 5 AND j = i + 3
  *   ==>  SELECT * FROM table WHERE i = 5 AND j = 8
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/subquery.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/subquery.scala
index 3c2ee31..9d023b7 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/subquery.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/subquery.scala
@@ -63,7 +63,7 @@ object RewritePredicateSubquery extends Rule[LogicalPlan] with PredicateHelper {
     // the produced join then becomes unresolved and break structural integrity. We should
     // de-duplicate conflicting attributes.
     // SPARK-26078: it may also happen that the subquery has conflicting attributes with the outer
-    // values. In this case, the resulting join would contain trivially true conditions (eg.
+    // values. In this case, the resulting join would contain trivially true conditions (e.g.
     // id#3 = id#3) which cannot be de-duplicated after. In this method, if there are conflicting
     // attributes in the join condition, the subquery's conflicting attributes are changed using
     // a projection which aliases them and resolves the problem.
@@ -174,7 +174,7 @@ object RewritePredicateSubquery extends Rule[LogicalPlan] with PredicateHelper {
           val inConditions = values.zip(sub.output).map(EqualTo.tupled)
           // To handle a null-aware predicate not-in-subquery in nested conditions
           // (e.g., `v > 0 OR t1.id NOT IN (SELECT id FROM t2)`), we transform
-          // `inConditon` (t1.id=t2.id) into `(inCondition) OR ISNULL(inCondition)`.
+          // `inCondition` (t1.id=t2.id) into `(inCondition) OR ISNULL(inCondition)`.
           //
           // For example, `SELECT * FROM t1 WHERE v > 0 OR t1.id NOT IN (SELECT id FROM t2)`
           // is transformed into a plan below;
@@ -567,7 +567,7 @@ object RewriteCorrelatedScalarSubquery extends Rule[LogicalPlan] with AliasHelpe
                 subqueryRoot = Project(projList ++ havingInputs, subqueryRoot)
               case s @ SubqueryAlias(alias, _) =>
                 subqueryRoot = SubqueryAlias(alias, subqueryRoot)
-              case op => sys.error(s"Unexpected operator $op in corelated subquery")
+              case op => sys.error(s"Unexpected operator $op in correlated subquery")
             }
 
             // CASE WHEN alwaysTrue IS NULL THEN resultOnZeroTups
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
index 1f32620..948b94a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
@@ -127,7 +127,7 @@ object ParserUtils {
     }
   }
 
-  /** Unescape baskslash-escaped string enclosed by quotes. */
+  /** Unescape backslash-escaped string enclosed by quotes. */
   def unescapeSQLString(b: String): String = {
     var enclosure: Character = null
     val sb = new StringBuilder(b.length())
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
index 864ca4f..e0839a3 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
@@ -397,7 +397,7 @@ abstract class QueryPlan[PlanType <: QueryPlan[PlanType]]
 
       case ar: AttributeReference if allAttributes.indexOf(ar.exprId) == -1 =>
         // Top level `AttributeReference` may also be used for output like `Alias`, we should
-        // normalize the epxrId too.
+        // normalize the exprId too.
         id += 1
         ar.withExprId(ExprId(id)).canonicalized
 
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
index ad5c3fd..1a9c9d1 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
@@ -136,7 +136,7 @@ abstract class LogicalPlan
   def outputOrdering: Seq[SortOrder] = Nil
 
   /**
-   * Returns true iff `other`'s output is semantically the same, ie.:
+   * Returns true iff `other`'s output is semantically the same, i.e.:
    *  - it contains the same number of `Attribute`s;
    *  - references are the same;
    *  - the order is equal too.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/PlanHelper.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/PlanHelper.scala
index 63348f7..5ec488e 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/PlanHelper.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/PlanHelper.scala
@@ -29,7 +29,7 @@ object PlanHelper {
   /**
    * Check if there's any expression in this query plan operator that is
    * - A WindowExpression but the plan is not Window
-   * - An AggregateExpresion but the plan is not Aggregate or Window
+   * - An AggregateExpression but the plan is not Aggregate or Window
    * - A Generator but the plan is not Generate
    * Returns the list of invalid expressions that this operator hosts. This can happen when
    * 1. The input query from users contain invalid expressions.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
index aa7151a..0e4bfa4 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
@@ -675,7 +675,7 @@ object Expand {
     val numAttributes = attrMap.size
     assert(numAttributes <= GroupingID.dataType.defaultSize * 8)
     val mask = if (numAttributes != 64) (1L << numAttributes) - 1 else 0xFFFFFFFFFFFFFFFFL
-    // Calculate the attrbute masks of selected grouping set. For example, if we have GroupBy
+    // Calculate the attribute masks of selected grouping set. For example, if we have GroupBy
     // attributes (a, b, c, d), grouping set (a, c) will produce the following sequence:
     // (15, 7, 13), whose binary form is (1111, 0111, 1101)
     val masks = (mask +: groupingSetAttrs.map(attrMap).map(index =>
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala
index 17e1cb4..c4002aa 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala
@@ -164,7 +164,7 @@ trait Partitioning {
    * i.e. the current dataset does not need to be re-partitioned for the `required`
    * Distribution (it is possible that tuples within a partition need to be reorganized).
    *
-   * A [[Partitioning]] can never satisfy a [[Distribution]] if its `numPartitions` does't match
+   * A [[Partitioning]] can never satisfy a [[Distribution]] if its `numPartitions` doesn't match
    * [[Distribution.requiredNumPartitions]].
    */
   final def satisfies(required: Distribution): Boolean = {
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeFormatterHelper.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeFormatterHelper.scala
index f02b2d08..eac34c8 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeFormatterHelper.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeFormatterHelper.scala
@@ -226,8 +226,8 @@ private object DateTimeFormatterHelper {
       // string at res(0). So when the first element here is empty string we do not need append `'`
       // literal to the DateTimeFormatterBuilder.
       case ("", idx) if idx != 0 => builder.appendLiteral("'")
-      case (pattenPart, idx) if idx % 2 == 0 =>
-        var rest = pattenPart
+      case (patternPart, idx) if idx % 2 == 0 =>
+        var rest = patternPart
         while (rest.nonEmpty) {
           rest match {
             case extractor(prefix, secondFraction, suffix) =>
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeUtils.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeUtils.scala
index 87cf3c9..0543ef9 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeUtils.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeUtils.scala
@@ -189,7 +189,7 @@ object DateTimeUtils {
    * precision, so this conversion is lossy.
    */
   def microsToMillis(micros: Long): Long = {
-    // When the timestamp is negative i.e before 1970, we need to adjust the millseconds portion.
+    // When the timestamp is negative i.e before 1970, we need to adjust the milliseconds portion.
     // Example - 1965-01-01 10:11:12.123456 is represented as (-157700927876544) in micro precision.
     // In millis precision the above needs to be represented as (-157700927877).
     Math.floorDiv(micros, MICROS_PER_MILLIS)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala
index ae7066d..addf140 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala
@@ -173,13 +173,13 @@ class QuantileSummaries(
       // Take the case of the sample `10` from `b`. In the original stream, it could have appeared
       // right after `0` (as expressed by `g=1`) or right before `20`, so `delta=99+0-1=98`.
       // In the GK algorithm's style of working in terms of maximum bounds, one can observe that the
-      // maximum additional uncertainty over samples comming from `b` is `max(g_a + delta_a) =
+      // maximum additional uncertainty over samples coming from `b` is `max(g_a + delta_a) =
       // floor(2 * eps_a * n_a)`. Likewise, additional uncertainty over samples from `a` is
       // `floor(2 * eps_b * n_b)`.
       // Only samples that interleave the other side are affected. That means that samples from
       // one side that are lesser (or greater) than all samples from the other side are just copied
-      // unmodifed.
-      // If the merging instances have different `relativeError`, the resulting instance will cary
+      // unmodified.
+      // If the merging instances have different `relativeError`, the resulting instance will carry
       // the largest one: `eps_ab = max(eps_a, eps_b)`.
       // The main invariant of the GK algorithm is kept:
       // `max(g_ab + delta_ab) <= floor(2 * eps_ab * (n_a + n_b))` since
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 69f04e1..e8e1120 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -1145,7 +1145,7 @@ object SQLConf {
 
   val CODEGEN_FACTORY_MODE = buildConf("spark.sql.codegen.factoryMode")
     .doc("This config determines the fallback behavior of several codegen generators " +
-      "during tests. `FALLBACK` means trying codegen first and then fallbacking to " +
+      "during tests. `FALLBACK` means trying codegen first and then falling back to " +
       "interpreted if any compile error happens. Disabling fallback if `CODEGEN_ONLY`. " +
       "`NO_CODEGEN` skips codegen and goes interpreted path always. Note that " +
       "this config works only for tests.")
@@ -1570,7 +1570,7 @@ object SQLConf {
   val JSON_EXPRESSION_OPTIMIZATION =
     buildConf("spark.sql.optimizer.enableJsonExpressionOptimization")
       .doc("Whether to optimize JSON expressions in SQL optimizer. It includes pruning " +
-        "unnecessary columns from from_json, simplifing from_json + to_json, to_json + " +
+        "unnecessary columns from from_json, simplifying from_json + to_json, to_json + " +
         "named_struct(from_json.col1, from_json.col2, ....).")
       .version("3.1.0")
       .booleanConf
@@ -2058,7 +2058,7 @@ object SQLConf {
     buildConf("spark.sql.decimalOperations.allowPrecisionLoss")
       .internal()
       .doc("When true (default), establishing the result type of an arithmetic operation " +
-        "happens according to Hive behavior and SQL ANSI 2011 specification, ie. rounding the " +
+        "happens according to Hive behavior and SQL ANSI 2011 specification, i.e. rounding the " +
         "decimal part of the result if an exact representation is not possible. Otherwise, NULL " +
         "is returned in those cases, as previously.")
       .version("2.3.1")
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala
index 9fa27c7..4badcba 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala
@@ -204,7 +204,7 @@ object RandomDataGenerator {
             specialDates.map(java.sql.Date.valueOf))
         }
       case TimestampType =>
-        def uniformMicorsRand(rand: Random): Long = {
+        def uniformMicrosRand(rand: Random): Long = {
           var milliseconds = rand.nextLong() % 253402329599999L
           // -62135740800000L is the number of milliseconds before January 1, 1970, 00:00:00 GMT
           // for "0001-01-01 00:00:00.000000". We need to find a
@@ -225,7 +225,7 @@ object RandomDataGenerator {
         if (SQLConf.get.getConf(SQLConf.DATETIME_JAVA8API_ENABLED)) {
           randomNumeric[Instant](
             rand,
-            (rand: Random) => DateTimeUtils.microsToInstant(uniformMicorsRand(rand)),
+            (rand: Random) => DateTimeUtils.microsToInstant(uniformMicrosRand(rand)),
             specialTs.map { s =>
               val ldt = LocalDateTime.parse(s.replace(" ", "T"))
               ldt.atZone(ZoneId.systemDefault()).toInstant
@@ -235,7 +235,7 @@ object RandomDataGenerator {
             rand,
             (rand: Random) => {
               // DateTimeUtils.toJavaTimestamp takes microsecond.
-              val ts = DateTimeUtils.toJavaTimestamp(uniformMicorsRand(rand))
+              val ts = DateTimeUtils.toJavaTimestamp(uniformMicrosRand(rand))
               // The generated `ts` is based on the hybrid calendar Julian + Gregorian since
               // 1582-10-15 but it should be valid in Proleptic Gregorian calendar too which is used
               // by Spark SQL since version 3.0 (see SPARK-26651). We try to convert `ts` to
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala
index f5bfdc5..61186c1 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala
@@ -427,7 +427,7 @@ class AnalysisSuite extends AnalysisTest with Matchers {
     checkAnalysis(plan, expected)
   }
 
-  test("SPARK-12102: Ignore nullablity when comparing two sides of case") {
+  test("SPARK-12102: Ignore nullability when comparing two sides of case") {
     val relation = LocalRelation(Symbol("a").struct(Symbol("x").int),
       Symbol("b").struct(Symbol("x").int.withNullability(false)))
     val plan = relation.select(
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveGroupingAnalyticsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveGroupingAnalyticsSuite.scala
index 249e7a4..cdfae14 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveGroupingAnalyticsSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveGroupingAnalyticsSuite.scala
@@ -160,7 +160,7 @@ class ResolveGroupingAnalyticsSuite extends AnalysisTest {
   }
 
   test("grouping function") {
-    // GrouingSets
+    // GroupingSets
     val originalPlan = GroupingSets(Seq(Seq(), Seq(unresolved_a), Seq(unresolved_a, unresolved_b)),
       Seq(unresolved_a, unresolved_b), r1,
       Seq(unresolved_a, unresolved_b, UnresolvedAlias(count(unresolved_c)),
@@ -200,7 +200,7 @@ class ResolveGroupingAnalyticsSuite extends AnalysisTest {
   }
 
   test("grouping_id") {
-    // GrouingSets
+    // GroupingSets
     val originalPlan = GroupingSets(Seq(Seq(), Seq(unresolved_a), Seq(unresolved_a, unresolved_b)),
       Seq(unresolved_a, unresolved_b), r1,
       Seq(unresolved_a, unresolved_b, UnresolvedAlias(count(unresolved_c)),
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala
index 1e5bc27..5c4d45b 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala
@@ -260,7 +260,7 @@ class TypeCoercionSuite extends AnalysisTest {
 
     // Tests that its not possible to setup implicit casts between two map types when
     // source map's key type is integer and the target map's key type are either Binary,
-    // Boolean, Date, Timestamp, Array, Struct, CaleandarIntervalType or NullType
+    // Boolean, Date, Timestamp, Array, Struct, CalendarIntervalType or NullType
     nonCastableTargetTypes.foreach { targetType =>
       shouldNotCast(sourceType, targetType)
     }
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationsSuite.scala
index cdc3f42..fa77947 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationsSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationsSuite.scala
@@ -887,7 +887,7 @@ class UnsupportedOperationsSuite extends SparkFunSuite with SQLHelper {
     }
   }
 
-  /** Assert that the logical plan is supported for continuous procsssing mode */
+  /** Assert that the logical plan is supported for continuous processing mode */
   def assertSupportedForContinuousProcessing(
     name: String,
     plan: LogicalPlan,
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala
index adaabfe..bca8c56 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala
@@ -527,7 +527,7 @@ class CodeGenerationSuite extends SparkFunSuite with ExpressionEvalHelper {
       .exists(_.getRenderedMessage().contains("Generated method too long")))
   }
 
-  test("SPARK-28916: subexrepssion elimination can cause 64kb code limit on UnsafeProjection") {
+  test("SPARK-28916: subexpression elimination can cause 64kb code limit on UnsafeProjection") {
     val numOfExprs = 10000
     val exprs = (0 to numOfExprs).flatMap(colIndex =>
       Seq(Add(BoundReference(colIndex, DoubleType, true),
@@ -554,7 +554,7 @@ class CodeGenerationSuite extends SparkFunSuite with ExpressionEvalHelper {
 
     // Expecting result:
     //   "((scala.math.LowPriorityOrderingImplicits$$anon$3) references[0] /* comparator */)"
-    // Using lenient assertions to be resilient to annonymous class numbering changes
+    // Using lenient assertions to be resilient to anonymous class numbering changes
     assert(!refTerm.contains("null"))
     assert(refTerm.contains("scala.math.LowPriorityOrderingImplicits$$anon$"))
   }
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala
index 3d6f693..57abdb4 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala
@@ -425,14 +425,14 @@ class ComplexTypeSuite extends SparkFunSuite with ExpressionEvalHelper {
     def checkErrorMessage(
       childDataType: DataType,
       fieldDataType: DataType,
-      errorMesage: String): Unit = {
+      errorMessage: String): Unit = {
       val e = intercept[org.apache.spark.sql.AnalysisException] {
         ExtractValue(
           Literal.create(null, childDataType),
           Literal.create(null, fieldDataType),
           _ == _)
       }
-      assert(e.getMessage().contains(errorMesage))
+      assert(e.getMessage().contains(errorMessage))
     }
 
     checkErrorMessage(structType, IntegerType, "Field name should be String Literal")
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ConditionalExpressionSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ConditionalExpressionSuite.scala
index 87e34aca..ee6f89a 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ConditionalExpressionSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ConditionalExpressionSuite.scala
@@ -212,8 +212,8 @@ class ConditionalExpressionSuite extends SparkFunSuite with ExpressionEvalHelper
 
   test("case key when - internal pattern matching expects a List while apply takes a Seq") {
     val indexedSeq = IndexedSeq(Literal(1), Literal(42), Literal(42), Literal(1))
-    val caseKeyWhaen = CaseKeyWhen(Literal(12), indexedSeq)
-    assert(caseKeyWhaen.branches ==
+    val caseKeyWhen = CaseKeyWhen(Literal(12), indexedSeq)
+    assert(caseKeyWhen.branches ==
       IndexedSeq((Literal(12) === Literal(1), Literal(42)),
         (Literal(12) === Literal(42), Literal(1))))
   }
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala
index 70eb391..26d9815 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala
@@ -451,7 +451,7 @@ trait ExpressionEvalHelper extends ScalaCheckDrivenPropertyChecks with PlanTestB
     if (interpret.isDefined && codegen.isDefined && !compareResults(interpret.get, codegen.get)) {
       fail(s"Incorrect evaluation: $expr, interpret: ${interpret.get}, codegen: ${codegen.get}")
     } else if (interpretExc.isDefined && codegenExc.isEmpty) {
-      fail(s"Incorrect evaluation: $expr, interpet threw exception ${interpretExc.get}")
+      fail(s"Incorrect evaluation: $expr, interpret threw exception ${interpretExc.get}")
     } else if (interpretExc.isEmpty && codegenExc.isDefined) {
       fail(s"Incorrect evaluation: $expr, codegen threw exception ${codegenExc.get}")
     } else if (interpretExc.isDefined && codegenExc.isDefined
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala
index bc2b93e..d425d0b 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala
@@ -212,9 +212,9 @@ class ObjectExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper {
 
     val initializeWithNonexistingMethod = InitializeJavaBean(
       Literal.fromObject(new java.util.LinkedList[Int]),
-      Map("nonexisting" -> Literal(1)))
+      Map("nonexistent" -> Literal(1)))
     checkExceptionInExpression[Exception](initializeWithNonexistingMethod,
-      """A method named "nonexisting" is not declared in any enclosing class """ +
+      """A method named "nonexistent" is not declared in any enclosing class """ +
         "nor any supertype")
 
     val initializeWithWrongParamType = InitializeJavaBean(
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
index 730574a..78e9cf8 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
@@ -118,7 +118,7 @@ class StringExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper {
     testElt(null, 1, null, "world")
     testElt(null, null, "hello", "world")
 
-    // Invalid ranages
+    // Invalid ranges
     testElt(null, 3, "hello", "world")
     testElt(null, 0, "hello", "world")
     testElt(null, -1, "hello", "world")
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/PercentileSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/PercentileSuite.scala
index 972db7f..d6e6142 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/PercentileSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/PercentileSuite.scala
@@ -66,20 +66,20 @@ class PercentileSuite extends SparkFunSuite {
     // Test with row with frequency. Second and third columns are frequency in Int and Long
     val countForFrequencyTest = 1000
     val rowsWithFrequency = (1 to countForFrequencyTest).map(x => Seq(x, x):+ x.toLong)
-    val expectedPercentilesWithFrquency = Seq(1.0, 500.0, 707.0, 866.0, 1000.0)
+    val expectedPercentilesWithFrequency = Seq(1.0, 500.0, 707.0, 866.0, 1000.0)
 
     val frequencyExpressionInt = BoundReference(1, IntegerType, nullable = false)
     val aggInt = new Percentile(childExpression, percentageExpression, frequencyExpressionInt)
-    runTest(aggInt, rowsWithFrequency, expectedPercentilesWithFrquency)
+    runTest(aggInt, rowsWithFrequency, expectedPercentilesWithFrequency)
 
     val frequencyExpressionLong = BoundReference(2, LongType, nullable = false)
     val aggLong = new Percentile(childExpression, percentageExpression, frequencyExpressionLong)
-    runTest(aggLong, rowsWithFrequency, expectedPercentilesWithFrquency)
+    runTest(aggLong, rowsWithFrequency, expectedPercentilesWithFrequency)
 
     // Run test with Flatten data
     val flattenRows = (1 to countForFrequencyTest).flatMap(current =>
       (1 to current).map(y => current )).map(Seq(_))
-    runTest(agg, flattenRows, expectedPercentilesWithFrquency)
+    runTest(agg, flattenRows, expectedPercentilesWithFrequency)
   }
 
   private def runTest(agg: Percentile,
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeBlockSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeBlockSuite.scala
index d660afb..9d4c598 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeBlockSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeBlockSuite.scala
@@ -115,7 +115,7 @@ class CodeBlockSuite extends SparkFunSuite {
     assert(exprValues === Set(isNull1, value1, isNull2, value2, literal))
   }
 
-  test("Throws exception when interpolating unexcepted object in code block") {
+  test("Throws exception when interpolating unexpected object in code block") {
     val obj = Tuple2(1, 1)
     val e = intercept[IllegalArgumentException] {
       code"$obj"
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala
index 2eea840..8543b62 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala
@@ -154,11 +154,11 @@ class SetOperationSuite extends PlanTest {
       .union(testRelation2.select(Literal(-1L).as("vcol"), 'd, 'e, 'f))
       .groupBy('a, 'b, 'c)('a, 'b, 'c, sum('vcol).as("sum"))
       .where(GreaterThan('sum, Literal(0L))).analyze
-    val multiplerAttr = planFragment.output.last
+    val multiplierAttr = planFragment.output.last
     val output = planFragment.output.dropRight(1)
     val expectedPlan = Project(output,
       Generate(
-        ReplicateRows(Seq(multiplerAttr) ++ output),
+        ReplicateRows(Seq(multiplierAttr) ++ output),
         Nil,
         false,
         None,
@@ -183,11 +183,11 @@ class SetOperationSuite extends PlanTest {
       .select('a, 'b, 'c,
         If(GreaterThan('vcol1_count, 'vcol2_count), 'vcol2_count, 'vcol1_count).as("min_count"))
       .analyze
-    val multiplerAttr = planFragment.output.last
+    val multiplierAttr = planFragment.output.last
     val output = planFragment.output.dropRight(1)
     val expectedPlan = Project(output,
       Generate(
-        ReplicateRows(Seq(multiplerAttr) ++ output),
+        ReplicateRows(Seq(multiplierAttr) ++ output),
         Nil,
         false,
         None,
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
index 0f1b4a3..e98ec6a 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
@@ -958,7 +958,7 @@ class DDLParserSuite extends AnalysisTest {
         Some(first())))
   }
 
-  test("alter table: mutiple property changes are not allowed") {
+  test("alter table: multiple property changes are not allowed") {
     intercept[ParseException] {
       parsePlan("ALTER TABLE table_name ALTER COLUMN a.b.c " +
         "TYPE bigint COMMENT 'new comment'")}
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DataTypeParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DataTypeParserSuite.scala
index b9f9840..46ad5d1 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DataTypeParserSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DataTypeParserSuite.scala
@@ -124,8 +124,8 @@ class DataTypeParserSuite extends SparkFunSuite {
   unsupported("struct<x int, y string>")
 
   test("Do not print empty parentheses for no params") {
-    assert(intercept("unkwon").getMessage.contains("unkwon is not supported"))
-    assert(intercept("unkwon(1,2,3)").getMessage.contains("unkwon(1,2,3) is not supported"))
+    assert(intercept("unknown").getMessage.contains("unknown is not supported"))
+    assert(intercept("unknown(1,2,3)").getMessage.contains("unknown(1,2,3) is not supported"))
   }
 
   // DataType parser accepts certain reserved keywords.
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ErrorParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ErrorParserSuite.scala
index 00b6828..99051d6 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ErrorParserSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ErrorParserSuite.scala
@@ -77,7 +77,7 @@ class ErrorParserSuite extends AnalysisTest {
   }
 
   test("SPARK-21136: misleading error message due to problematic antlr grammar") {
-    intercept("select * from a left joinn b on a.id = b.id", "missing 'JOIN' at 'joinn'")
+    intercept("select * from a left join_ b on a.id = b.id", "missing 'JOIN' at 'join_'")
     intercept("select * from test where test.t is like 'test'", "mismatched input 'is' expecting")
     intercept("SELECT * FROM test WHERE x NOT NULL", "mismatched input 'NOT' expecting")
   }
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala
index 9f6a76b..0b304a7 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala
@@ -590,7 +590,7 @@ class ExpressionParserSuite extends AnalysisTest {
 
         // tests that have different result regarding the conf
         if (escape) {
-          // When SQLConf.ESCAPED_STRING_LITERALS is enabled, string literal parsing fallbacks to
+          // When SQLConf.ESCAPED_STRING_LITERALS is enabled, string literal parsing falls back to
           // Spark 1.6 behavior.
 
           // 'LIKE' string literals.
@@ -780,7 +780,7 @@ class ExpressionParserSuite extends AnalysisTest {
     val complexName = FunctionIdentifier("`ba`r", Some("`fo`o"))
     assertEqual(complexName.quotedString, UnresolvedAttribute("`fo`o.`ba`r"))
     intercept(complexName.unquotedString, "mismatched input")
-    // Function identifier contains countious backticks should be treated correctly.
+    // Function identifier contains continuous backticks should be treated correctly.
     val complexName2 = FunctionIdentifier("ba``r", Some("fo``o"))
     assertEqual(complexName2.quotedString, UnresolvedAttribute("fo``o.ba``r"))
   }
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/TableIdentifierParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/TableIdentifierParserSuite.scala
index f037ce7..bad3e0d 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/TableIdentifierParserSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/TableIdentifierParserSuite.scala
@@ -355,7 +355,7 @@ class TableIdentifierParserSuite extends SparkFunSuite with SQLKeywordUtils {
     assert(complexName === parseTableIdentifier("```d``b``1`.```weird``table``name`"))
     assert(complexName === parseTableIdentifier(complexName.quotedString))
     intercept[ParseException](parseTableIdentifier(complexName.unquotedString))
-    // Table identifier contains countious backticks should be treated correctly.
+    // Table identifier contains continuous backticks should be treated correctly.
     val complexName2 = TableIdentifier("x``y", Some("d``b"))
     assert(complexName2 === parseTableIdentifier(complexName2.quotedString))
   }
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/UnsafeArraySuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/UnsafeArraySuite.scala
index 2e190c6..5729b02 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/UnsafeArraySuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/UnsafeArraySuite.scala
@@ -55,7 +55,7 @@ class UnsafeArraySuite extends SparkFunSuite {
     BigDecimal("1.2345678901234567890123456").setScale(21, BigDecimal.RoundingMode.FLOOR),
     BigDecimal("2.3456789012345678901234567").setScale(21, BigDecimal.RoundingMode.FLOOR))
 
-  val calenderintervalArray = Array(
+  val calendarintervalArray = Array(
     new CalendarInterval(3, 2, 321), new CalendarInterval(1, 2, 123))
 
   val intMultiDimArray = Array(Array(1), Array(2, 20), Array(3, 30, 300))
@@ -142,12 +142,12 @@ class UnsafeArraySuite extends SparkFunSuite {
 
     val schema = new StructType().add("array", ArrayType(CalendarIntervalType))
     val encoder = RowEncoder(schema).resolveAndBind()
-    val externalRow = Row(calenderintervalArray)
+    val externalRow = Row(calendarintervalArray)
     val ir = encoder.createSerializer().apply(externalRow)
     val unsafeCalendar = ir.getArray(0)
     assert(unsafeCalendar.isInstanceOf[UnsafeArrayData])
-    assert(unsafeCalendar.numElements == calenderintervalArray.length)
-    calenderintervalArray.zipWithIndex.map { case (e, i) =>
+    assert(unsafeCalendar.numElements == calendarintervalArray.length)
+    calendarintervalArray.zipWithIndex.map { case (e, i) =>
       assert(unsafeCalendar.getInterval(i) == e)
     }
 
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java
index d8e61a8..b2ef1c7 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java
@@ -49,7 +49,7 @@ public class ColumnDescriptor {
 
   public static ColumnDescriptor newPrimitiveColumnDescriptor(String name, String comment, Type type, int position) {
     // Current usage looks like it's only for metadata columns, but if that changes then
-    // this method may need to require a type qualifiers aruments.
+    // this method may need to require a type qualifiers arguments.
     return new ColumnDescriptor(name, comment, new TypeDescriptor(type), position);
   }
 
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoValue.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoValue.java
index 2b2359c..bf3c6b2 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoValue.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoValue.java
@@ -52,7 +52,7 @@ public class GetInfoValue {
       stringValue = tGetInfoValue.getStringValue();
       break;
     default:
-      throw new IllegalArgumentException("Unreconigzed TGetInfoValue");
+      throw new IllegalArgumentException("Unrecognized TGetInfoValue");
     }
   }
 
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
index c25c742..5963067 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
@@ -103,7 +103,7 @@ public class GetColumnsOperation extends MetadataOperation {
       "Schema of table that is the scope of a reference attribute "
       + "(null if the DATA_TYPE isn't REF)")
   .addPrimitiveColumn("SCOPE_TABLE", Type.STRING_TYPE,
-      "Table name that this the scope of a reference attribure "
+      "Table name that this the scope of a reference attribute "
       + "(null if the DATA_TYPE isn't REF)")
   .addPrimitiveColumn("SOURCE_DATA_TYPE", Type.SMALLINT_TYPE,
       "Source type of a distinct type or user-generated Ref type, "
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
index 1b3e8fe..f47a438 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
@@ -175,9 +175,9 @@ public class HiveSessionImpl implements HiveSession {
     @Override
     protected int processCmd(String cmd) {
       int rc = 0;
-      String cmd_trimed = cmd.trim();
+      String cmd_trimmed = cmd.trim();
       try {
-        executeStatementInternal(cmd_trimed, null, false, 0);
+        executeStatementInternal(cmd_trimmed, null, false, 0);
       } catch (HiveSQLException e) {
         rc = -1;
         LOG.warn("Failed to execute HQL command in global .hiverc file.", e);
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java
index ab9ed5b..13fc552 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java
@@ -137,7 +137,7 @@ public class ThriftHttpCLIService extends ThriftCLIService {
       httpServer.setHandler(context);
       context.addServlet(new ServletHolder(thriftHttpServlet), httpPath);
 
-      // TODO: check defaults: maxTimeout, keepalive, maxBodySize, bodyRecieveDuration, etc.
+      // TODO: check defaults: maxTimeout, keepalive, maxBodySize, bodyReceiveDuration, etc.
       // Finally, start the server
       httpServer.start();
       // In case HIVE_SERVER2_THRIFT_HTTP_PORT or hive.server2.thrift.http.port is configured with
diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/DummyListeners.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/DummyListeners.scala
index 4564c22..820859b 100644
--- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/DummyListeners.scala
+++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/DummyListeners.scala
@@ -17,7 +17,7 @@
 
 /**
  * These classes in this package are intentionally placed to the outer package of spark,
- * because IsolatedClientLoader leverages Spark classloader for shared classess including
+ * because IsolatedClientLoader leverages Spark classloader for shared classes including
  * spark package, and the test should fail if Spark initializes these listeners with
  * IsolatedClientLoader.
  */
diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnvSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnvSuite.scala
index f28faea..f2bb337 100644
--- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnvSuite.scala
+++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnvSuite.scala
@@ -42,7 +42,7 @@ class SparkSQLEnvSuite extends SparkFunSuite {
       QUERY_EXECUTION_LISTENERS.key -> classOf[DummyQueryExecutionListener].getCanonicalName,
       STREAMING_QUERY_LISTENERS.key -> classOf[DummyStreamingQueryListener].getCanonicalName,
       WAREHOUSE_PATH.key -> TestHiveContext.makeWarehouseDir().toURI.getPath,
-      // The issue occured from "maven" and list of custom jars, but providing list of custom
+      // The issue occurred from "maven" and list of custom jars, but providing list of custom
       // jars to initialize HiveClient isn't trivial, so just use "maven".
       HIVE_METASTORE_JARS.key -> "maven",
       HIVE_METASTORE_VERSION.key -> null,
diff --git a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
index 4ce1964..c263932 100644
--- a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
+++ b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
@@ -56,7 +56,7 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
     TestHive.setConf(SQLConf.IN_MEMORY_PARTITION_PRUNING, true)
     // Ensures that cross joins are enabled so that we can test them
     TestHive.setConf(SQLConf.CROSS_JOINS_ENABLED, true)
-    // Ensures that the table insertion behaivor is consistent with Hive
+    // Ensures that the table insertion behavior is consistent with Hive
     TestHive.setConf(SQLConf.STORE_ASSIGNMENT_POLICY, StoreAssignmentPolicy.LEGACY.toString)
     // Fix session local timezone to America/Los_Angeles for those timezone sensitive tests
     // (timestamp_*)
@@ -305,7 +305,7 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
     // Unsupported underscore syntax.
     "inputddl5",
 
-    // Thift is broken...
+    // Thrift is broken...
     "inputddl8",
 
     // Hive changed ordering of ddl:
@@ -496,7 +496,7 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
     "drop_partitions_filter2",
     "drop_partitions_filter3",
 
-    // The following failes due to truncate table
+    // The following fails due to truncate table
     "truncate_table",
 
     // We do not support DFS command.
@@ -716,7 +716,7 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
     "groupby_multi_insert_common_distinct",
     "groupby_multi_single_reducer2",
     "groupby_multi_single_reducer3",
-    "groupby_mutli_insert_common_distinct",
+    "groupby_multi_insert_common_distinct",
     "groupby_neg_float",
     "groupby_ppd",
     "groupby_ppr",
@@ -958,8 +958,8 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
     "subq2",
     "subquery_exists",
     "subquery_exists_having",
-    "subquery_notexists",
-    "subquery_notexists_having",
+    "subquery_nonexistent",
+    "subquery_nonexistent_having",
     "subquery_in_having",
     "tablename_with_select",
     "timestamp_comparison",
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index a89243c..e02589e 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -332,7 +332,7 @@ private[hive] object HiveMetastoreCatalog {
       metastoreSchema: StructType,
       inferredSchema: StructType): StructType = try {
     // scalastyle:off caselocale
-    // Find any nullable fields in mestastore schema that are missing from the inferred schema.
+    // Find any nullable fields in metastore schema that are missing from the inferred schema.
     val metastoreFields = metastoreSchema.map(f => f.name.toLowerCase -> f).toMap
     val missingNullables = metastoreFields
       .filterKeys(!inferredSchema.map(_.name.toLowerCase).contains(_))
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
index 34befb8..b4ebf15 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
@@ -367,14 +367,14 @@ private[hive] class HiveClientImpl(
 
   override def getDatabase(dbName: String): CatalogDatabase = withHiveState {
     Option(client.getDatabase(dbName)).map { d =>
-      val paras = Option(d.getParameters).map(_.asScala.toMap).getOrElse(Map()) ++
+      val params = Option(d.getParameters).map(_.asScala.toMap).getOrElse(Map()) ++
         Map(PROP_OWNER -> shim.getDatabaseOwnerName(d))
 
       CatalogDatabase(
         name = d.getName,
         description = Option(d.getDescription).getOrElse(""),
         locationUri = CatalogUtils.stringToURI(d.getLocationUri),
-        properties = paras)
+        properties = params)
     }.getOrElse(throw new NoSuchDatabaseException(dbName))
   }
 
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala
index 4096916..26baff3 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala
@@ -92,7 +92,7 @@ case class HiveScriptTransformationExec(
                 scriptOutputWritable.readFields(scriptOutputStream)
               } catch {
                 case _: EOFException =>
-                  // This means that the stdout of `proc` (ie. TRANSFORM process) has exhausted.
+                  // This means that the stdout of `proc` (i.e. TRANSFORM process) has exhausted.
                   // Ideally the proc should *not* be alive at this point but
                   // there can be a lag between EOF being written out and the process
                   // being terminated. So explicitly waiting for the process to be done.
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
index 3c3f31a..63e4688 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
@@ -172,7 +172,7 @@ case class InsertIntoHiveTable(
     table.bucketSpec match {
       case Some(bucketSpec) =>
         // Writes to bucketed hive tables are allowed only if user does not care about maintaining
-        // table's bucketing ie. both "hive.enforce.bucketing" and "hive.enforce.sorting" are
+        // table's bucketing i.e. both "hive.enforce.bucketing" and "hive.enforce.sorting" are
         // set to false
         val enforceBucketingConfig = "hive.enforce.bucketing"
         val enforceSortingConfig = "hive.enforce.sorting"
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala
index cd07199..3fa8449 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala
@@ -34,7 +34,7 @@ import org.apache.spark.sql.execution.datasources.DataSourceStrategy
  * the hive table relation will be updated based on pruned partitions.
  *
  * This rule is executed in optimization phase, so the statistics can be updated before physical
- * planning, which is useful for some spark strategy, eg.
+ * planning, which is useful for some spark strategy, e.g.
  * [[org.apache.spark.sql.execution.SparkStrategies.JoinSelection]].
  *
  * TODO: merge this with PruneFileSourcePartitions after we completely make hive as a data source.
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
index 28bbc2d..df5334c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
@@ -19,7 +19,7 @@ set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.auto.convert.join=true;
 
--- A SMB join followed by a mutli-insert
+-- A SMB join followed by a multi-insert
 explain 
 from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 
@@ -41,7 +41,7 @@ select * from dest2 order by k1, k2;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=200;
 
--- A SMB join followed by a mutli-insert
+-- A SMB join followed by a multi-insert
 explain 
 from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 
@@ -61,7 +61,7 @@ select * from dest1 order by k1, k2;
 select * from dest2 order by k1, k2;
 
 set hive.auto.convert.sortmerge.join.to.mapjoin=true;
--- A SMB join followed by a mutli-insert
+-- A SMB join followed by a multi-insert
 explain 
 from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
index 91e97de..843ba4a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
@@ -18,7 +18,7 @@ FROM src
 INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *;
 
 -- Insert data into the bucketed table by selecting from another bucketed table
--- The bucketing positions dont match - although the actual bucketing do.
+-- The bucketing positions don't match - although the actual bucketing do.
 -- This should be a map-only operation
 EXPLAIN
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
@@ -37,7 +37,7 @@ CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING)
 CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;
 
 -- Insert data into the bucketed table by selecting from another bucketed table
--- The bucketing positions dont match - this should be a map-reduce operation
+-- The bucketing positions don't match - this should be a map-reduce operation
 EXPLAIN
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT x.key, x.value from 
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_20.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
index f70e7d5..4c56cad 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
@@ -32,7 +32,7 @@ CREATE TABLE test_table3 (key STRING, value1 int, value2 string) PARTITIONED BY
 CLUSTERED BY (value1) SORTED BY (value1) INTO 2 BUCKETS;
 
 -- Insert data into the bucketed table by selecting from another bucketed table
--- This should be a map-only operation, although the bucketing positions dont match
+-- This should be a map-only operation, although the bucketing positions don't match
 EXPLAIN
 INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.value, a.key, a.value FROM test_table1 a WHERE a.ds = '1';
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala
index 71750e6..b715f48 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala
@@ -35,7 +35,7 @@ import org.apache.spark.util.Utils
 
 case class TestData(key: Int, value: String)
 
-case class ThreeCloumntable(key: Int, value: String, key1: String)
+case class ThreeColumnTable(key: Int, value: String, key1: String)
 
 class InsertSuite extends QueryTest with TestHiveSingleton with BeforeAndAfter
     with SQLTestUtils  with PrivateMethodTester  {
@@ -764,7 +764,7 @@ class InsertSuite extends QueryTest with TestHiveSingleton with BeforeAndAfter
       val path = dir.toURI.getPath
 
       val e = intercept[AnalysisException] {
-        sql(s"INSERT OVERWRITE LOCAL DIRECTORY '${path}' TABLE notexists")
+        sql(s"INSERT OVERWRITE LOCAL DIRECTORY '${path}' TABLE nonexistent")
       }.getMessage
       assert(e.contains("Table or view not found"))
     }
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
index 2ea9894..2e98a76 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
@@ -735,7 +735,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto
     }
   }
 
-  test("analyze column command paramaters validation") {
+  test("analyze column command parameters validation") {
     val e1 = intercept[IllegalArgumentException] {
       AnalyzeColumnCommand(TableIdentifier("test"), Option(Seq("c1")), true).run(spark)
     }
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationSuite.scala
index 1018ae5..0876709 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationSuite.scala
@@ -372,7 +372,7 @@ class HiveScriptTransformationSuite extends BaseScriptTransformationSuite with T
     }
   }
 
-  test("SPARK-32400: TRANSFORM doesn't support CalenderIntervalType/UserDefinedType (hive serde)") {
+  test("SPARK-32400: TRANSFORM doesn't support CalendarIntervalType/UserDefinedType (hive serde)") {
     assume(TestUtils.testCommandAvailable("/bin/bash"))
     withTempView("v") {
       val df = Seq(
@@ -410,7 +410,7 @@ class HiveScriptTransformationSuite extends BaseScriptTransformationSuite with T
   }
 
   test("SPARK-32400: TRANSFORM doesn't support" +
-    " CalenderIntervalType/UserDefinedType end to end (hive serde)") {
+    " CalendarIntervalType/UserDefinedType end to end (hive serde)") {
     assume(TestUtils.testCommandAvailable("/bin/bash"))
     withTempView("v") {
       val df = Seq(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 6b82b12..3370695 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -228,7 +228,7 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi
       checkAnswer(sql(s"SHOW functions $db.temp_abs"), Row("temp_abs"))
       checkAnswer(sql(s"SHOW functions `$db`.`temp_abs`"), Row("temp_abs"))
       checkAnswer(sql(s"SHOW functions `$db`.`temp_abs`"), Row("temp_abs"))
-      checkAnswer(sql("SHOW functions `a function doens't exist`"), Nil)
+      checkAnswer(sql("SHOW functions `a function doesn't exist`"), Nil)
       checkAnswer(sql("SHOW functions `temp_weekofyea*`"), Row("temp_weekofyear"))
 
       // this probably will failed if we add more function with `sha` prefixing.
@@ -768,7 +768,7 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi
           sql("SELECT * FROM nested").collect().toSeq)
 
         intercept[AnalysisException] {
-          sql("CREATE TABLE test_ctas_1234 AS SELECT * from notexists").collect()
+          sql("CREATE TABLE test_ctas_1234 AS SELECT * from nonexistent").collect()
         }
       }
     }
@@ -1739,12 +1739,12 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi
            |SELECT 'blarr'
          """.stripMargin)
 
-      // project list is the same order of paritioning columns in table definition
+      // project list is the same order of partitioning columns in table definition
       checkAnswer(
         sql(s"SELECT p1, p2, p3, p4, p5, c1 FROM $table"),
         Row("a", "b", "c", "d", "e", "blarr") :: Nil)
 
-      // project list does not have the same order of paritioning columns in table definition
+      // project list does not have the same order of partitioning columns in table definition
       checkAnswer(
         sql(s"SELECT p2, p3, p4, p1, p5, c1 FROM $table"),
         Row("b", "c", "d", "a", "e", "blarr") :: Nil)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org