You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by yi...@apache.org on 2022/09/06 20:28:44 UTC

[hudi] branch master updated: [MINOR] Remove redundant braces (#6604)

This is an automated email from the ASF dual-hosted git repository.

yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 5fcec2fdc4 [MINOR] Remove redundant braces (#6604)
5fcec2fdc4 is described below

commit 5fcec2fdc4e7fbbbec4a01bcdf79b737655744f6
Author: felixYyu <fe...@live.cn>
AuthorDate: Wed Sep 7 04:28:33 2022 +0800

    [MINOR] Remove redundant braces (#6604)
---
 hudi-cli/src/main/scala/org/apache/hudi/cli/DedupeSparkJob.scala      | 2 +-
 .../main/scala/org/apache/spark/sql/hudi/HoodieSqlCommonUtils.scala   | 2 +-
 .../src/main/scala/org/apache/spark/sql/hudi/DedupeSparkJob.scala     | 2 +-
 .../spark/sql/hudi/command/procedures/RunClusteringProcedure.scala    | 4 ++--
 .../sql/hudi/command/procedures/ValidateHoodieSyncProcedure.scala     | 2 +-
 .../src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala   | 2 +-
 .../src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala     | 2 +-
 7 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/hudi-cli/src/main/scala/org/apache/hudi/cli/DedupeSparkJob.scala b/hudi-cli/src/main/scala/org/apache/hudi/cli/DedupeSparkJob.scala
index e471ed9258..cc9bd8ca2b 100644
--- a/hudi-cli/src/main/scala/org/apache/hudi/cli/DedupeSparkJob.scala
+++ b/hudi-cli/src/main/scala/org/apache/hudi/cli/DedupeSparkJob.scala
@@ -55,7 +55,7 @@ class DedupeSparkJob(basePath: String,
       s"""
       select  `${HoodieRecord.RECORD_KEY_METADATA_FIELD}` as dupe_key,
       count(*) as dupe_cnt
-      from ${tblName}
+      from $tblName
       group by `${HoodieRecord.RECORD_KEY_METADATA_FIELD}`
       having dupe_cnt > 1
       """
diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/HoodieSqlCommonUtils.scala b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/HoodieSqlCommonUtils.scala
index b02881bc3d..63186c0759 100644
--- a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/HoodieSqlCommonUtils.scala
+++ b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/HoodieSqlCommonUtils.scala
@@ -201,7 +201,7 @@ object HoodieSqlCommonUtils extends SparkAdapterSupport {
     val conf = sparkSession.sessionState.newHadoopConf()
     uri.map(makePathQualified(_, conf))
       .map(removePlaceHolder)
-      .getOrElse(throw new IllegalArgumentException(s"Missing location for ${identifier}"))
+      .getOrElse(throw new IllegalArgumentException(s"Missing location for $identifier"))
   }
 
   private def removePlaceHolder(path: String): String = {
diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/DedupeSparkJob.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/DedupeSparkJob.scala
index b6f610e7d7..e39d22aa05 100644
--- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/DedupeSparkJob.scala
+++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/DedupeSparkJob.scala
@@ -53,7 +53,7 @@ class DedupeSparkJob(basePath: String,
       s"""
       select  `${HoodieRecord.RECORD_KEY_METADATA_FIELD}` as dupe_key,
       count(*) as dupe_cnt
-      from ${tblName}
+      from $tblName
       group by `${HoodieRecord.RECORD_KEY_METADATA_FIELD}`
       having dupe_cnt > 1
       """
diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RunClusteringProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RunClusteringProcedure.scala
index 6c804e1190..18ea636c05 100644
--- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RunClusteringProcedure.scala
+++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RunClusteringProcedure.scala
@@ -82,7 +82,7 @@ class RunClusteringProcedure extends BaseProcedure
           HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME.key() -> "SELECTED_PARTITIONS",
           HoodieClusteringConfig.PARTITION_SELECTED.key() -> prunedPartitions
         )
-        logInfo(s"Partition predicates: ${p}, partition selected: ${prunedPartitions}")
+        logInfo(s"Partition predicates: $p, partition selected: $prunedPartitions")
       case _ =>
         logInfo("No partition predicates")
     }
@@ -94,7 +94,7 @@ class RunClusteringProcedure extends BaseProcedure
         conf = conf ++ Map(
           HoodieClusteringConfig.PLAN_STRATEGY_SORT_COLUMNS.key() -> o.asInstanceOf[String]
         )
-        logInfo(s"Order columns: ${o}")
+        logInfo(s"Order columns: $o")
       case _ =>
         logInfo("No order columns")
     }
diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ValidateHoodieSyncProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ValidateHoodieSyncProcedure.scala
index 67b752b5f6..77dd4f3ee0 100644
--- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ValidateHoodieSyncProcedure.scala
+++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ValidateHoodieSyncProcedure.scala
@@ -95,7 +95,7 @@ class ValidateHoodieSyncProcedure extends BaseProcedure with ProcedureBuilder wi
       sourceCount = countRecords(hiveServerUrl, srcMetaClient, srcDb, partitionCount, hiveUser, hivePass)
       targetCount = countRecords(hiveServerUrl, targetMetaClient, tgtDb, partitionCount, hiveUser, hivePass)
     } else {
-      logError(s"Unsupport mode ${mode}")
+      logError(s"Unsupport mode $mode")
     }
 
     val targetLatestCommit =
diff --git a/hudi-spark-datasource/hudi-spark2/src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala b/hudi-spark-datasource/hudi-spark2/src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala
index 385577dd30..9725fb63f5 100644
--- a/hudi-spark-datasource/hudi-spark2/src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala
+++ b/hudi-spark-datasource/hudi-spark2/src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala
@@ -111,7 +111,7 @@ class AvroDeserializer(rootAvroType: Schema, rootCatalystType: DataType) {
           // the value is processed as timestamp type with millisecond precision.
           updater.setLong(ordinal, value.asInstanceOf[Long] * 1000)
         case other => throw new IncompatibleSchemaException(
-          s"Cannot convert Avro logical type ${other} to Catalyst Timestamp type.")
+          s"Cannot convert Avro logical type $other to Catalyst Timestamp type.")
       }
 
       // Before we upgrade Avro to 1.8 for logical type support, spark-avro converts Long to Date.
diff --git a/hudi-spark-datasource/hudi-spark2/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala b/hudi-spark-datasource/hudi-spark2/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala
index 2673088f4f..2b88be8165 100644
--- a/hudi-spark-datasource/hudi-spark2/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala
+++ b/hudi-spark-datasource/hudi-spark2/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala
@@ -149,7 +149,7 @@ class AvroSerializer(rootCatalystType: DataType, rootAvroType: Schema, nullable:
         // output the timestamp value as with millisecond precision.
         case null => (getter, ordinal) => getter.getLong(ordinal) / 1000
         case other => throw new IncompatibleSchemaException(
-          s"Cannot convert Catalyst Timestamp type to Avro logical type ${other}")
+          s"Cannot convert Catalyst Timestamp type to Avro logical type $other")
       }
 
       case (ArrayType(et, containsNull), ARRAY) =>