You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kyuubi.apache.org by ch...@apache.org on 2023/03/09 11:02:18 UTC
[kyuubi] branch master updated: [KYUUBI #4488] [KSHC] Keep object original name defined in HiveBridgeHelper
This is an automated email from the ASF dual-hosted git repository.
chengpan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kyuubi.git
The following commit(s) were added to refs/heads/master by this push:
new dd9b58ae8 [KYUUBI #4488] [KSHC] Keep object original name defined in HiveBridgeHelper
dd9b58ae8 is described below
commit dd9b58ae810e618ca1ea68bcb9000a4ce8adb0a3
Author: Cheng Pan <ch...@apache.org>
AuthorDate: Thu Mar 9 19:02:08 2023 +0800
[KYUUBI #4488] [KSHC] Keep object original name defined in HiveBridgeHelper
### _Why are the changes needed?_
Respect Java/Scala coding conventions in KSHC (Kyuubi Spark Hive Connector).
For singleton(`object` in Scala) invoking, use `AbcUtils.method(...)` instead of `abcUtils.method(...)`
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4488 from pan3793/shc-rename.
Closes #4488
ec9a80198 [Cheng Pan] nit
84d3bb413 [Cheng Pan] Keep object orignal name defined in HiveBridgeHelper
Authored-by: Cheng Pan <ch...@apache.org>
Signed-off-by: Cheng Pan <ch...@apache.org>
---
.../kyuubi/spark/connector/hive/HiveTable.scala | 4 ++--
.../spark/connector/hive/HiveTableCatalog.scala | 8 ++++----
.../connector/hive/read/FilePartitionReader.scala | 6 +++---
.../spark/connector/hive/read/HiveFileIndex.scala | 6 +++---
.../hive/read/HivePartitionedReader.scala | 6 +++---
.../spark/connector/hive/read/HiveReader.scala | 6 +++---
.../spark/connector/hive/read/HiveScan.scala | 4 ++--
.../connector/hive/write/FileWriterFactory.scala | 4 ++--
.../spark/connector/hive/write/HiveWrite.scala | 4 ++--
.../hive/kyuubi/connector/HiveBridgeHelper.scala | 23 +++++++++-------------
10 files changed, 33 insertions(+), 38 deletions(-)
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/HiveTable.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/HiveTable.scala
index 3d1d4f8c4..ee6d5fc23 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/HiveTable.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/HiveTable.scala
@@ -31,7 +31,7 @@ import org.apache.spark.sql.connector.catalog.TableCapability.{BATCH_READ, BATCH
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.connector.read.ScanBuilder
import org.apache.spark.sql.connector.write.{LogicalWriteInfo, WriteBuilder}
-import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.{logicalExpressions, BucketSpecHelper}
+import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.{BucketSpecHelper, LogicalExpressions}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
@@ -68,7 +68,7 @@ case class HiveTable(
override def partitioning: Array[Transform] = {
val partitions = new mutable.ArrayBuffer[Transform]()
catalogTable.partitionColumnNames.foreach { col =>
- partitions += logicalExpressions.identity(logicalExpressions.reference(Seq(col)))
+ partitions += LogicalExpressions.identity(LogicalExpressions.reference(Seq(col)))
}
catalogTable.bucketSpec.foreach { spec =>
partitions += spec.asTransform
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/HiveTableCatalog.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/HiveTableCatalog.scala
index 4d0f48e57..c4d71dbba 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/HiveTableCatalog.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/HiveTableCatalog.scala
@@ -38,7 +38,7 @@ import org.apache.spark.sql.connector.catalog.NamespaceChange.RemoveProperty
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.hive.HiveUDFExpressionBuilder
-import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.{catalogV2Util, postExternalCatalogEvent, HiveMetastoreCatalog, HiveSessionCatalog}
+import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper._
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
@@ -198,8 +198,8 @@ class HiveTableCatalog(sparkSession: SparkSession)
throw new NoSuchTableException(ident)
}
- val properties = catalogV2Util.applyPropertiesChanges(catalogTable.properties, changes)
- val schema = catalogV2Util.applySchemaChanges(
+ val properties = CatalogV2Util.applyPropertiesChanges(catalogTable.properties, changes)
+ val schema = CatalogV2Util.applySchemaChanges(
catalogTable.schema,
changes)
val comment = properties.get(TableCatalog.PROP_COMMENT)
@@ -319,7 +319,7 @@ class HiveTableCatalog(sparkSession: SparkSession)
val metadata = catalog.getDatabaseMetadata(db).toMetadata
catalog.alterDatabase(
- toCatalogDatabase(db, catalogV2Util.applyNamespaceChanges(metadata, changes)))
+ toCatalogDatabase(db, CatalogV2Util.applyNamespaceChanges(metadata, changes)))
case _ =>
throw new NoSuchNamespaceException(namespace)
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/FilePartitionReader.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/FilePartitionReader.scala
index dc1fad27e..d0cd680d4 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/FilePartitionReader.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/FilePartitionReader.scala
@@ -23,7 +23,7 @@ import org.apache.parquet.io.ParquetDecodingException
import org.apache.spark.internal.Logging
import org.apache.spark.sql.connector.read.PartitionReader
import org.apache.spark.sql.execution.datasources.SchemaColumnConvertNotSupportedException
-import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.inputFileBlockHolder
+import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.InputFileBlockHolder
import org.apache.spark.sql.internal.SQLConf
// scalastyle:off line.size.limit
@@ -90,7 +90,7 @@ class FilePartitionReader[T](readers: Iterator[HivePartitionedFileReader[T]])
if (currentReader != null) {
currentReader.close()
}
- inputFileBlockHolder.unset()
+ InputFileBlockHolder.unset()
}
private def getNextReader(): HivePartitionedFileReader[T] = {
@@ -98,7 +98,7 @@ class FilePartitionReader[T](readers: Iterator[HivePartitionedFileReader[T]])
logInfo(s"Reading file $reader")
// Sets InputFileBlockHolder for the file block's information
val file = reader.file
- inputFileBlockHolder.set(file.filePath, file.start, file.length)
+ InputFileBlockHolder.set(file.filePath, file.start, file.length)
reader
}
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala
index 768d7ccea..82199e6f2 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala
@@ -29,7 +29,7 @@ import org.apache.spark.sql.catalyst.catalog.{CatalogTable, CatalogTablePartitio
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, BoundReference, Expression, Predicate}
import org.apache.spark.sql.connector.catalog.CatalogPlugin
import org.apache.spark.sql.execution.datasources._
-import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.hiveClientImpl
+import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.HiveClientImpl
import org.apache.spark.sql.types.StructType
import org.apache.kyuubi.spark.connector.hive.{HiveTableCatalog, KyuubiHiveConnectorException}
@@ -50,7 +50,7 @@ class HiveCatalogFileIndex(
private val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
- private lazy val hiveTable: Table = hiveClientImpl.toHiveTable(table)
+ private lazy val hiveTable: Table = HiveClientImpl.toHiveTable(table)
private val baseLocation: Option[URI] = table.storage.locationUri
@@ -111,7 +111,7 @@ class HiveCatalogFileIndex(
}
private def buildBindPartition(partition: CatalogTablePartition): BindPartition =
- BindPartition(partition, hiveClientImpl.toHivePartition(partition, hiveTable))
+ BindPartition(partition, HiveClientImpl.toHivePartition(partition, hiveTable))
override def partitionSpec(): PartitionSpec = {
throw notSupportOperator("partitionSpec")
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HivePartitionedReader.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HivePartitionedReader.scala
index 8270679c2..4c1690524 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HivePartitionedReader.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HivePartitionedReader.scala
@@ -33,7 +33,7 @@ import org.apache.spark.sql.catalyst.expressions.{Attribute, SpecificInternalRow
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.connector.read.PartitionReader
import org.apache.spark.sql.execution.datasources.PartitionedFile
-import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.{hadoopTableReader, hiveShim}
+import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.{HadoopTableReader, HiveShim}
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.SerializableConfiguration
@@ -108,7 +108,7 @@ case class HivePartitionedReader(
row.update(ordinal, UTF8String.fromString(oi.getPrimitiveJavaObject(value).getValue))
case oi: HiveDecimalObjectInspector =>
(value: Any, row: InternalRow, ordinal: Int) =>
- row.update(ordinal, hiveShim.toCatalystDecimal(oi, value))
+ row.update(ordinal, HiveShim.toCatalystDecimal(oi, value))
case oi: TimestampObjectInspector =>
(value: Any, row: InternalRow, ordinal: Int) =>
row.setLong(ordinal, DateTimeUtils.fromJavaTimestamp(oi.getPrimitiveJavaObject(value)))
@@ -120,7 +120,7 @@ case class HivePartitionedReader(
row.update(ordinal, oi.getPrimitiveJavaObject(value))
case oi =>
logDebug("HiveInspector class: " + oi.getClass.getName + ", charset: " + charset)
- val unwrapper = hadoopTableReader.unwrapperFor(oi)
+ val unwrapper = HadoopTableReader.unwrapperFor(oi)
(value: Any, row: InternalRow, ordinal: Int) => row(ordinal) = unwrapper(value)
}
}
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveReader.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveReader.scala
index 995c7e722..54f6e80c0 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveReader.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveReader.scala
@@ -30,7 +30,7 @@ import org.apache.hadoop.io.Writable
import org.apache.hadoop.mapred.{InputFormat, JobConf}
import org.apache.hadoop.util.ReflectionUtils
import org.apache.spark.sql.catalyst.expressions.AttributeReference
-import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.{hiveShim, hiveTableUtil}
+import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.{HiveShim, HiveTableUtil}
import org.apache.spark.sql.types.StructType
object HiveReader {
@@ -46,7 +46,7 @@ object HiveReader {
addColumnMetadataToConf(tableDesc, hiveConf, dataSchema, readDataSchema)
// Copy hive table properties to hiveConf. For example,
// initial job conf to read files with specified format
- hiveTableUtil.configureJobPropertiesForStorageHandler(tableDesc, hiveConf, false)
+ HiveTableUtil.configureJobPropertiesForStorageHandler(tableDesc, hiveConf, false)
}
private def addColumnMetadataToConf(
@@ -60,7 +60,7 @@ object HiveReader {
val neededColumnIDs =
readDataSchema.map(field => Integer.valueOf(dataSchema.fields.indexOf(field)))
- hiveShim.appendReadColumns(hiveConf, neededColumnIDs, neededColumnNames)
+ HiveShim.appendReadColumns(hiveConf, neededColumnIDs, neededColumnNames)
val deserializer = tableDesc.getDeserializerClass.newInstance
deserializer.initialize(hiveConf, tableDesc.getProperties)
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala
index b52a69222..64fcf23f8 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala
@@ -32,7 +32,7 @@ import org.apache.spark.sql.connector.read.PartitionReaderFactory
import org.apache.spark.sql.execution.PartitionedFileUtil
import org.apache.spark.sql.execution.datasources.{FilePartition, PartitionedFile}
import org.apache.spark.sql.execution.datasources.v2.FileScan
-import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.hiveClientImpl
+import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.HiveClientImpl
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.SerializableConfiguration
@@ -58,7 +58,7 @@ case class HiveScan(
val hiveConf = sparkSession.sessionState.newHadoopConf()
addCatalogTableConfToConf(hiveConf, catalogTable)
- val table = hiveClientImpl.toHiveTable(catalogTable)
+ val table = HiveClientImpl.toHiveTable(catalogTable)
HiveReader.initializeHiveConf(table, hiveConf, dataSchema, readDataSchema)
val broadcastHiveConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hiveConf))
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/FileWriterFactory.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/FileWriterFactory.scala
index 3b86d43c7..a58565d9d 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/FileWriterFactory.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/FileWriterFactory.scala
@@ -26,7 +26,7 @@ import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.write.{DataWriter, DataWriterFactory}
import org.apache.spark.sql.execution.datasources.{DynamicPartitionDataSingleWriter, SingleDirectoryDataWriter, WriteJobDescription}
-import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.sparkHadoopWriterUtils
+import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.SparkHadoopWriterUtils
/**
* This class is rewritten because of SPARK-42478, which affects Spark 3.3.2
@@ -39,7 +39,7 @@ case class FileWriterFactory(
// expected by Hadoop committers, but `JobId` cannot be serialized.
// thus, persist the serializable jobTrackerID in the class and make jobId a
// transient lazy val which recreates it each time to ensure jobId is unique.
- private[this] val jobTrackerID = sparkHadoopWriterUtils.createJobTrackerID(new Date)
+ private[this] val jobTrackerID = SparkHadoopWriterUtils.createJobTrackerID(new Date)
@transient private lazy val jobId = createJobID(jobTrackerID, 0)
override def createWriter(partitionId: Int, realTaskId: Long): DataWriter[InternalRow] = {
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/HiveWrite.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/HiveWrite.scala
index 2d72327b4..62db1fa0a 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/HiveWrite.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/HiveWrite.scala
@@ -38,7 +38,7 @@ import org.apache.spark.sql.execution.datasources.{BasicWriteJobStatsTracker, Wr
import org.apache.spark.sql.execution.datasources.v2.FileBatchWrite
import org.apache.spark.sql.execution.metric.SQLMetric
import org.apache.spark.sql.hive.execution.{HiveFileFormat, HiveOptions}
-import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.{hiveClientImpl, FileSinkDesc, StructTypeHelper}
+import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.{FileSinkDesc, HiveClientImpl, StructTypeHelper}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.SerializableConfiguration
@@ -56,7 +56,7 @@ case class HiveWrite(
private val options = info.options()
- private val hiveTable = hiveClientImpl.toHiveTable(table)
+ private val hiveTable = HiveClientImpl.toHiveTable(table)
private val hadoopConf = hiveTableCatalog.hadoopConfiguration()
diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/spark/sql/hive/kyuubi/connector/HiveBridgeHelper.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/spark/sql/hive/kyuubi/connector/HiveBridgeHelper.scala
index 1a11790d8..ce1e445fc 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/spark/sql/hive/kyuubi/connector/HiveBridgeHelper.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/spark/sql/hive/kyuubi/connector/HiveBridgeHelper.scala
@@ -20,16 +20,11 @@ package org.apache.spark.sql.hive.kyuubi.connector
import scala.collection.mutable
import org.apache.spark.SparkContext
-import org.apache.spark.internal.io.SparkHadoopWriterUtils
-import org.apache.spark.rdd.InputFileBlockHolder
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, ExternalCatalogEvent}
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Literal}
import org.apache.spark.sql.catalyst.util.quoteIfNeeded
-import org.apache.spark.sql.connector.catalog.CatalogV2Util
-import org.apache.spark.sql.connector.expressions.{BucketTransform, FieldReference, IdentityTransform, LogicalExpressions, Transform}
+import org.apache.spark.sql.connector.expressions.{BucketTransform, FieldReference, IdentityTransform, Transform}
import org.apache.spark.sql.connector.expressions.LogicalExpressions.{bucket, reference}
-import org.apache.spark.sql.hive.{HadoopTableReader, HiveShim, HiveTableUtil}
-import org.apache.spark.sql.hive.client.HiveClientImpl
import org.apache.spark.sql.types.{DataType, DoubleType, FloatType, StructType}
object HiveBridgeHelper {
@@ -42,14 +37,14 @@ object HiveBridgeHelper {
type InsertIntoHiveTable = org.apache.spark.sql.hive.execution.InsertIntoHiveTable
val hive = org.apache.spark.sql.hive.client.hive
- val logicalExpressions: LogicalExpressions.type = LogicalExpressions
- val hiveClientImpl: HiveClientImpl.type = HiveClientImpl
- val sparkHadoopWriterUtils: SparkHadoopWriterUtils.type = SparkHadoopWriterUtils
- val catalogV2Util: CatalogV2Util.type = CatalogV2Util
- val hiveTableUtil: HiveTableUtil.type = HiveTableUtil
- val hiveShim: HiveShim.type = HiveShim
- val inputFileBlockHolder: InputFileBlockHolder.type = InputFileBlockHolder
- val hadoopTableReader: HadoopTableReader.type = HadoopTableReader
+ val LogicalExpressions = org.apache.spark.sql.connector.expressions.LogicalExpressions
+ val HiveClientImpl = org.apache.spark.sql.hive.client.HiveClientImpl
+ val SparkHadoopWriterUtils = org.apache.spark.internal.io.SparkHadoopWriterUtils
+ val CatalogV2Util = org.apache.spark.sql.connector.catalog.CatalogV2Util
+ val HiveTableUtil = org.apache.spark.sql.hive.HiveTableUtil
+ val HiveShim = org.apache.spark.sql.hive.HiveShim
+ val InputFileBlockHolder = org.apache.spark.rdd.InputFileBlockHolder
+ val HadoopTableReader = org.apache.spark.sql.hive.HadoopTableReader
def postExternalCatalogEvent(sc: SparkContext, event: ExternalCatalogEvent): Unit = {
sc.listenerBus.post(event)