You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by hu...@apache.org on 2023/02/18 04:51:43 UTC

[spark] branch master updated: [SPARK-42470][SQL] Remove unused declarations from Hive module

This is an automated email from the ASF dual-hosted git repository.

huaxingao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 1389c9f5a93 [SPARK-42470][SQL] Remove unused declarations from Hive module
1389c9f5a93 is described below

commit 1389c9f5a932bb085c9589a6d5f1455e70d0d583
Author: yangjie01 <ya...@baidu.com>
AuthorDate: Fri Feb 17 20:51:24 2023 -0800

    [SPARK-42470][SQL] Remove unused declarations from Hive module
    
    ### What changes were proposed in this pull request?
    This pr cleans up unused declarations in the Hive module:
    
    - Input parameter `dataTypes` of `HiveInspectors#wrap` method: the input parameter `dataTypes` introduced by SPARK-9354, but after SPARK-17509, the implementation of `HiveInspectors#wrap` no longer needs to explicitly pass `dataTypes` and it becomes a unused, and `inputDataTypes` in `HiveSimpleUDF` becomes a unused after this pr
    
    - `UNLIMITED_DECIMAL_PRECISION` and `UNLIMITED_DECIMAL_SCALE` in `HiveShim`: these two `val` introduced by SPARK-6909 for unlimited decimals, but SPARK-9069 remove unlimited precision support for DecimalType and  SPARK-14877 deleted `object HiveMetastoreTypes` and used `.catalogString` instead, these two `val` are not used anymore.
    
    ### Why are the changes needed?
    Code clean up.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass GitHub Actions
    
    Closes #40053 from LuciferYang/sql-hive-unused.
    
    Authored-by: yangjie01 <ya...@baidu.com>
    Signed-off-by: huaxingao <hu...@apple.com>
---
 .../src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala    | 3 +--
 sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala     | 4 ----
 sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala     | 5 +----
 3 files changed, 2 insertions(+), 10 deletions(-)

diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
index 9d8437b068d..8ff96fa63c2 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
@@ -806,8 +806,7 @@ private[hive] trait HiveInspectors {
   def wrap(
       row: Seq[Any],
       wrappers: Array[(Any) => Any],
-      cache: Array[AnyRef],
-      dataTypes: Array[DataType]): Array[AnyRef] = {
+      cache: Array[AnyRef]): Array[AnyRef] = {
     var i = 0
     val length = wrappers.length
     while (i < length) {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala
index 351cde58427..6605d297010 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala
@@ -40,10 +40,6 @@ import org.apache.spark.sql.types.Decimal
 import org.apache.spark.util.Utils
 
 private[hive] object HiveShim {
-  // Precision and scale to pass for unlimited decimals; these are the same as the precision and
-  // scale Hive 0.13 infers for BigDecimals from sources that don't specify them (e.g. UDFs)
-  val UNLIMITED_DECIMAL_PRECISION = 38
-  val UNLIMITED_DECIMAL_SCALE = 18
   val HIVE_GENERIC_UDF_MACRO_CLS = "org.apache.hadoop.hive.ql.udf.generic.GenericUDFMacro"
 
   /*
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
index d5cff31ed64..67229d494a2 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
@@ -91,12 +91,9 @@ private[hive] case class HiveSimpleUDF(
   @transient
   private lazy val cached: Array[AnyRef] = new Array[AnyRef](children.length)
 
-  @transient
-  private lazy val inputDataTypes: Array[DataType] = children.map(_.dataType).toArray
-
   // TODO: Finish input output types.
   override def eval(input: InternalRow): Any = {
-    val inputs = wrap(children.map(_.eval(input)), wrappers, cached, inputDataTypes)
+    val inputs = wrap(children.map(_.eval(input)), wrappers, cached)
     val ret = FunctionRegistry.invoke(
       method,
       function,


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org