You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kyuubi.apache.org by ya...@apache.org on 2021/12/23 03:38:43 UTC

[incubator-kyuubi] branch master updated: [KYUUBI #1592] [TEST][ICEBERG][DELTA] Introduce row level operation test for data lake format

This is an automated email from the ASF dual-hosted git repository.

yao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-kyuubi.git


The following commit(s) were added to refs/heads/master by this push:
     new 152e394  [KYUUBI #1592] [TEST][ICEBERG][DELTA] Introduce row level operation test for data lake format
152e394 is described below

commit 152e39401690a15f9f0a744663f8f52bfc8ee5f1
Author: Cheng Pan <ch...@apache.org>
AuthorDate: Thu Dec 23 11:38:34 2021 +0800

    [KYUUBI #1592] [TEST][ICEBERG][DELTA] Introduce row level operation test for data lake format
    
    ### _Why are the changes needed?_
    
    Introduce row level operation test for data lake format and remove redundant tests
    
    ### _How was this patch tested?_
    - [x] Add some test cases that check the changes thoroughly including negative and positive cases if possible
    
    - [ ] Add screenshots for manual tests if appropriate
    
    - [x] [Run test](https://kyuubi.readthedocs.io/en/latest/develop_tools/testing.html#running-tests) locally before make a pull request
    
    Closes #1592 from pan3793/test.
    
    Closes #1592
    
    892feb8a [Cheng Pan] Simplify test
    72fd0939 [Cheng Pan] Fix import
    9d208392 [Cheng Pan] Add row level operation tests for Iceberg and Delta
    7730b4df [Cheng Pan] Also test JDBC connection MetaData in Kyuubi server
    7a41dfdf [Cheng Pan] [TEST] Remove redundant type info test in DeltaMetadataTests
    
    Authored-by: Cheng Pan <ch...@apache.org>
    Signed-off-by: Kent Yao <ya...@apache.org>
---
 .../spark/operation/SparkDeltaOperationSuite.scala |   6 +-
 .../operation/SparkIcebergOperationSuite.scala     |   6 +-
 .../spark/operation/SparkOperationSuite.scala      | 191 +--------------------
 .../kyuubi/operation/DeltaMetadataTests.scala      | 158 -----------------
 .../kyuubi/operation/RowLevelOperationTests.scala  | 113 ++++++++++++
 .../kyuubi/operation/SparkMetadataTests.scala      | 189 ++++++++++++++++++++
 6 files changed, 312 insertions(+), 351 deletions(-)

diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkDeltaOperationSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkDeltaOperationSuite.scala
index d07eca4..449cbe0 100644
--- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkDeltaOperationSuite.scala
+++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkDeltaOperationSuite.scala
@@ -18,11 +18,13 @@
 package org.apache.kyuubi.engine.spark.operation
 
 import org.apache.kyuubi.engine.spark.WithSparkSQLEngine
-import org.apache.kyuubi.operation.DeltaMetadataTests
+import org.apache.kyuubi.operation.{DeltaMetadataTests, RowLevelOperationTests}
 import org.apache.kyuubi.tags.DeltaTest
 
 @DeltaTest
-class SparkDeltaOperationSuite extends WithSparkSQLEngine with DeltaMetadataTests {
+class SparkDeltaOperationSuite extends WithSparkSQLEngine
+  with DeltaMetadataTests
+  with RowLevelOperationTests {
 
   override protected def jdbcUrl: String = getJdbcUrl
 
diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkIcebergOperationSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkIcebergOperationSuite.scala
index 225fa0a..0fbe762 100644
--- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkIcebergOperationSuite.scala
+++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkIcebergOperationSuite.scala
@@ -18,11 +18,13 @@
 package org.apache.kyuubi.engine.spark.operation
 
 import org.apache.kyuubi.engine.spark.WithSparkSQLEngine
-import org.apache.kyuubi.operation.IcebergMetadataTests
+import org.apache.kyuubi.operation.{IcebergMetadataTests, RowLevelOperationTests}
 import org.apache.kyuubi.tags.IcebergTest
 
 @IcebergTest
-class SparkIcebergOperationSuite extends WithSparkSQLEngine with IcebergMetadataTests {
+class SparkIcebergOperationSuite extends WithSparkSQLEngine
+  with IcebergMetadataTests
+  with RowLevelOperationTests {
 
   override protected def jdbcUrl: String = getJdbcUrl
 
diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkOperationSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkOperationSuite.scala
index bf42c1b..e28ae53 100644
--- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkOperationSuite.scala
+++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkOperationSuite.scala
@@ -17,8 +17,6 @@
 
 package org.apache.kyuubi.engine.spark.operation
 
-import java.sql.{DatabaseMetaData, ResultSet, SQLException, SQLFeatureNotSupportedException}
-
 import scala.collection.JavaConverters._
 import scala.util.Random
 
@@ -32,7 +30,6 @@ import org.apache.spark.kyuubi.SparkContextHelper
 import org.apache.spark.sql.catalyst.analysis.FunctionRegistry
 import org.apache.spark.sql.types._
 
-import org.apache.kyuubi.Utils
 import org.apache.kyuubi.engine.spark.WithSparkSQLEngine
 import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim
 import org.apache.kyuubi.operation.{HiveMetadataTests, SparkQueryTests}
@@ -240,7 +237,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with
     }
   }
 
-  test("execute statement -  select decimal") {
+  test("execute statement - select decimal") {
     withJdbcStatement() { statement =>
       val resultSet = statement.executeQuery("SELECT 1.2BD as col1, 1.23BD AS col2")
       assert(resultSet.next())
@@ -256,7 +253,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with
     }
   }
 
-  test("execute statement -  select column name with dots") {
+  test("execute statement - select column name with dots") {
     withJdbcStatement() { statement =>
       val resultSet = statement.executeQuery("select 'tmp.hello'")
       assert(resultSet.next())
@@ -305,190 +302,6 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with
     }
   }
 
-  test("Hive JDBC Database MetaData API Auditing") {
-    withJdbcStatement() { statement =>
-      val metaData = statement.getConnection.getMetaData
-      Seq(
-        () => metaData.allProceduresAreCallable(),
-        () => metaData.getURL,
-        () => metaData.getUserName,
-        () => metaData.isReadOnly,
-        () => metaData.nullsAreSortedHigh,
-        () => metaData.nullsAreSortedLow,
-        () => metaData.nullsAreSortedAtStart(),
-        () => metaData.nullsAreSortedAtEnd(),
-        () => metaData.usesLocalFiles(),
-        () => metaData.usesLocalFilePerTable(),
-        () => metaData.supportsMixedCaseIdentifiers(),
-        () => metaData.supportsMixedCaseQuotedIdentifiers(),
-        () => metaData.storesUpperCaseIdentifiers(),
-        () => metaData.storesUpperCaseQuotedIdentifiers(),
-        () => metaData.storesLowerCaseIdentifiers(),
-        () => metaData.storesLowerCaseQuotedIdentifiers(),
-        () => metaData.storesMixedCaseIdentifiers(),
-        () => metaData.storesMixedCaseQuotedIdentifiers(),
-        () => metaData.getSQLKeywords,
-        () => metaData.nullPlusNonNullIsNull,
-        () => metaData.supportsConvert,
-        () => metaData.supportsTableCorrelationNames,
-        () => metaData.supportsDifferentTableCorrelationNames,
-        () => metaData.supportsExpressionsInOrderBy(),
-        () => metaData.supportsOrderByUnrelated,
-        () => metaData.supportsGroupByUnrelated,
-        () => metaData.supportsGroupByBeyondSelect,
-        () => metaData.supportsLikeEscapeClause,
-        () => metaData.supportsMultipleTransactions,
-        () => metaData.supportsMinimumSQLGrammar,
-        () => metaData.supportsCoreSQLGrammar,
-        () => metaData.supportsExtendedSQLGrammar,
-        () => metaData.supportsANSI92EntryLevelSQL,
-        () => metaData.supportsANSI92IntermediateSQL,
-        () => metaData.supportsANSI92FullSQL,
-        () => metaData.supportsIntegrityEnhancementFacility,
-        () => metaData.isCatalogAtStart,
-        () => metaData.supportsSubqueriesInComparisons,
-        () => metaData.supportsSubqueriesInExists,
-        () => metaData.supportsSubqueriesInIns,
-        () => metaData.supportsSubqueriesInQuantifieds,
-        // Spark support this, see https://issues.apache.org/jira/browse/SPARK-18455
-        () => metaData.supportsCorrelatedSubqueries,
-        () => metaData.supportsOpenCursorsAcrossCommit,
-        () => metaData.supportsOpenCursorsAcrossRollback,
-        () => metaData.supportsOpenStatementsAcrossCommit,
-        () => metaData.supportsOpenStatementsAcrossRollback,
-        () => metaData.getMaxBinaryLiteralLength,
-        () => metaData.getMaxCharLiteralLength,
-        () => metaData.getMaxColumnsInGroupBy,
-        () => metaData.getMaxColumnsInIndex,
-        () => metaData.getMaxColumnsInOrderBy,
-        () => metaData.getMaxColumnsInSelect,
-        () => metaData.getMaxColumnsInTable,
-        () => metaData.getMaxConnections,
-        () => metaData.getMaxCursorNameLength,
-        () => metaData.getMaxIndexLength,
-        () => metaData.getMaxSchemaNameLength,
-        () => metaData.getMaxProcedureNameLength,
-        () => metaData.getMaxCatalogNameLength,
-        () => metaData.getMaxRowSize,
-        () => metaData.doesMaxRowSizeIncludeBlobs,
-        () => metaData.getMaxStatementLength,
-        () => metaData.getMaxStatements,
-        () => metaData.getMaxTableNameLength,
-        () => metaData.getMaxTablesInSelect,
-        () => metaData.getMaxUserNameLength,
-        () => metaData.supportsTransactionIsolationLevel(1),
-        () => metaData.supportsDataDefinitionAndDataManipulationTransactions,
-        () => metaData.supportsDataManipulationTransactionsOnly,
-        () => metaData.dataDefinitionCausesTransactionCommit,
-        () => metaData.dataDefinitionIgnoredInTransactions,
-        () => metaData.getColumnPrivileges("", "%", "%", "%"),
-        () => metaData.getTablePrivileges("", "%", "%"),
-        () => metaData.getBestRowIdentifier("", "%", "%", 0, true),
-        () => metaData.getVersionColumns("", "%", "%"),
-        () => metaData.getExportedKeys("", "default", ""),
-        () => metaData.supportsResultSetConcurrency(ResultSet.TYPE_FORWARD_ONLY, 2),
-        () => metaData.ownUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.ownDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.ownInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.othersUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.othersDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.othersInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.updatesAreDetected(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.deletesAreDetected(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.insertsAreDetected(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.supportsNamedParameters(),
-        () => metaData.supportsMultipleOpenResults,
-        () => metaData.supportsGetGeneratedKeys,
-        () => metaData.getSuperTypes("", "%", "%"),
-        () => metaData.getSuperTables("", "%", "%"),
-        () => metaData.getAttributes("", "%", "%", "%"),
-        () => metaData.getResultSetHoldability,
-        () => metaData.locatorsUpdateCopy,
-        () => metaData.supportsStatementPooling,
-        () => metaData.getRowIdLifetime,
-        () => metaData.supportsStoredFunctionsUsingCallSyntax,
-        () => metaData.autoCommitFailureClosesAllResultSets,
-        () => metaData.getClientInfoProperties,
-        () => metaData.getFunctionColumns("", "%", "%", "%"),
-        () => metaData.getPseudoColumns("", "%", "%", "%"),
-        () => metaData.generatedKeyAlwaysReturned).foreach { func =>
-        val e = intercept[SQLFeatureNotSupportedException](func())
-        assert(e.getMessage === "Method not supported")
-      }
-
-      import org.apache.kyuubi.KYUUBI_VERSION
-      assert(metaData.allTablesAreSelectable)
-      assert(metaData.getDatabaseProductName === "Apache Kyuubi (Incubating)")
-      assert(metaData.getDatabaseProductVersion === KYUUBI_VERSION)
-      assert(metaData.getDriverName === "Kyuubi Project Hive JDBC Shaded Client")
-      assert(metaData.getDriverVersion === KYUUBI_VERSION)
-      assert(metaData.getDatabaseMajorVersion === Utils.majorVersion(KYUUBI_VERSION))
-      assert(metaData.getDatabaseMinorVersion === Utils.minorVersion(KYUUBI_VERSION))
-      assert(
-        metaData.getIdentifierQuoteString === " ",
-        "This method returns a space \" \" if identifier quoting is not supported")
-      assert(metaData.getNumericFunctions === "")
-      assert(metaData.getStringFunctions === "")
-      assert(metaData.getSystemFunctions === "")
-      assert(metaData.getTimeDateFunctions === "")
-      assert(metaData.getSearchStringEscape === "\\")
-      assert(metaData.getExtraNameCharacters === "")
-      assert(metaData.supportsAlterTableWithAddColumn())
-      assert(!metaData.supportsAlterTableWithDropColumn())
-      assert(metaData.supportsColumnAliasing())
-      assert(metaData.supportsGroupBy)
-      assert(!metaData.supportsMultipleResultSets)
-      assert(!metaData.supportsNonNullableColumns)
-      assert(metaData.supportsOuterJoins)
-      assert(metaData.supportsFullOuterJoins)
-      assert(metaData.supportsLimitedOuterJoins)
-      assert(metaData.getSchemaTerm === "database")
-      assert(metaData.getProcedureTerm === "UDF")
-      assert(metaData.getCatalogTerm === "instance")
-      assert(metaData.getCatalogSeparator === ".")
-      assert(metaData.supportsSchemasInDataManipulation)
-      assert(!metaData.supportsSchemasInProcedureCalls)
-      assert(metaData.supportsSchemasInTableDefinitions)
-      assert(!metaData.supportsSchemasInIndexDefinitions)
-      assert(!metaData.supportsSchemasInPrivilegeDefinitions)
-      // This is actually supported, but hive jdbc package return false
-      assert(!metaData.supportsCatalogsInDataManipulation)
-      assert(!metaData.supportsCatalogsInProcedureCalls)
-      // This is actually supported, but hive jdbc package return false
-      assert(!metaData.supportsCatalogsInTableDefinitions)
-      assert(!metaData.supportsCatalogsInIndexDefinitions)
-      assert(!metaData.supportsCatalogsInPrivilegeDefinitions)
-      assert(!metaData.supportsPositionedDelete)
-      assert(!metaData.supportsPositionedUpdate)
-      assert(!metaData.supportsSelectForUpdate)
-      assert(!metaData.supportsStoredProcedures)
-      // This is actually supported, but hive jdbc package return false
-      assert(!metaData.supportsUnion)
-      assert(metaData.supportsUnionAll)
-      assert(metaData.getMaxColumnNameLength === 128)
-      assert(metaData.getDefaultTransactionIsolation === java.sql.Connection.TRANSACTION_NONE)
-      assert(!metaData.supportsTransactions)
-      assert(!metaData.getProcedureColumns("", "%", "%", "%").next())
-      intercept[SQLException](metaData.getPrimaryKeys("", "default", ""))
-      assert(!metaData.getImportedKeys("", "default", "").next())
-      intercept[SQLException] {
-        metaData.getCrossReference("", "default", "src", "", "default", "src2")
-      }
-      assert(!metaData.getIndexInfo("", "default", "src", true, true).next())
-
-      assert(metaData.supportsResultSetType(new Random().nextInt()))
-      assert(!metaData.supportsBatchUpdates)
-      assert(!metaData.getUDTs(",", "%", "%", null).next())
-      assert(!metaData.supportsSavepoints)
-      assert(!metaData.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT))
-      assert(metaData.getJDBCMajorVersion === 3)
-      assert(metaData.getJDBCMinorVersion === 0)
-      assert(metaData.getSQLStateType === DatabaseMetaData.sqlStateSQL)
-      assert(metaData.getMaxLogicalLobSize === 0)
-      assert(!metaData.supportsRefCursors)
-    }
-  }
-
   test("get operation status") {
     val sql = "select date_sub(date'2011-11-11', '1')"
 
diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/DeltaMetadataTests.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/DeltaMetadataTests.scala
index d44f223..8dbf401 100644
--- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/DeltaMetadataTests.scala
+++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/DeltaMetadataTests.scala
@@ -87,162 +87,4 @@ trait DeltaMetadataTests extends HiveJDBCTestHelper with DeltaSuiteMixin {
       assert(!rs3.next())
     }
   }
-
-  test("get type info") {
-    withJdbcStatement() { statement =>
-      val typeInfo = statement.getConnection.getMetaData.getTypeInfo
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "VOID")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.NULL)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "BOOLEAN")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.BOOLEAN)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "TINYINT")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.TINYINT)
-      assert(typeInfo.getInt(PRECISION) === 3)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 10)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "SMALLINT")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.SMALLINT)
-      assert(typeInfo.getInt(PRECISION) === 5)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 10)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "INTEGER")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.INTEGER)
-      assert(typeInfo.getInt(PRECISION) === 10)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 10)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "BIGINT")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.BIGINT)
-      assert(typeInfo.getInt(PRECISION) === 19)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 10)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "FLOAT")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.FLOAT)
-      assert(typeInfo.getInt(PRECISION) === 7)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 10)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "DOUBLE")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.DOUBLE)
-      assert(typeInfo.getInt(PRECISION) === 15)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 10)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "STRING")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.VARCHAR)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "BINARY")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.BINARY)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "DECIMAL")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.DECIMAL)
-      assert(typeInfo.getInt(PRECISION) === 38)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 10)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "DATE")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.DATE)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "TIMESTAMP")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.TIMESTAMP)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 3)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "ARRAY")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.ARRAY)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 0)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "MAP")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.JAVA_OBJECT)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 0)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "STRUCT")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.STRUCT)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 0)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-
-      typeInfo.next()
-      assert(typeInfo.getString(TYPE_NAME) === "INTERVAL")
-      assert(typeInfo.getInt(DATA_TYPE) === java.sql.Types.OTHER)
-      assert(typeInfo.getInt(PRECISION) === 0)
-      assert(typeInfo.getShort(NULLABLE) === 1)
-      assert(!typeInfo.getBoolean(CASE_SENSITIVE))
-      assert(typeInfo.getShort(SEARCHABLE) === 0)
-      assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
-    }
-  }
 }
diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/RowLevelOperationTests.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/RowLevelOperationTests.scala
new file mode 100644
index 0000000..ec2c70f
--- /dev/null
+++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/RowLevelOperationTests.scala
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.operation
+
+import java.sql.Statement
+
+import org.apache.kyuubi.DataLakeSuiteMixin
+
+trait RowLevelOperationTests extends HiveJDBCTestHelper with DataLakeSuiteMixin {
+
+  private def createAndInitTable(
+      stmt: Statement,
+      tableName: String)(records: => Seq[(Int, String)]): Unit = {
+    stmt.execute(
+      s"""CREATE TABLE $tableName (
+         |  id   INT,
+         |  city STRING
+         |) USING $format
+         |""".stripMargin)
+    stmt.execute(
+      s"""INSERT INTO $tableName VALUES
+         |${records.map(r => s"(${r._1}, '${r._2}')").mkString(",\n")}
+         |""".stripMargin)
+  }
+
+  test("update operation") {
+    val testTbl = s"${format}_update"
+    withJdbcStatement(testTbl) { stmt =>
+      createAndInitTable(stmt, testTbl) {
+        (1, "HangZhou") :: (2, "Seattle") :: (3, "Beijing") :: Nil
+      }
+      stmt.execute(s"UPDATE $testTbl SET city = 'Shanghai' WHERE id IN (1)")
+      stmt.execute(s"UPDATE $testTbl SET id = -1 WHERE city = 'Seattle'")
+
+      val rs1 = stmt.executeQuery(s"SELECT * FROM $testTbl ORDER BY id")
+      assert(rs1.next())
+      assert(rs1.getInt("id") === -1)
+      assert(rs1.getString("city") === "Seattle")
+      assert(rs1.next())
+      assert(rs1.getInt("id") === 1)
+      assert(rs1.getString("city") === "Shanghai")
+      assert(rs1.next())
+      assert(rs1.getInt("id") === 3)
+      assert(rs1.getString("city") === "Beijing")
+      assert(!rs1.next())
+    }
+  }
+
+  test("delete operation") {
+    val testTbl = s"${format}_delete"
+    withJdbcStatement(testTbl) { stmt =>
+      createAndInitTable(stmt, testTbl) {
+        (1, "HangZhou") :: (2, "Seattle") :: (3, "Beijing") :: Nil
+      }
+      stmt.execute(s"DELETE FROM $testTbl WHERE WHERE id = 1")
+      stmt.execute(s"DELETE FROM $testTbl WHERE WHERE city = 'Seattle'")
+
+      val rs1 = stmt.executeQuery(s"SELECT * FROM $testTbl ORDER BY id")
+      assert(rs1.next())
+      assert(rs1.getInt("id") === 3)
+      assert(rs1.getString("city") === "Beijing")
+      assert(!rs1.next())
+    }
+  }
+
+  test("merge into operation") {
+    val testTblBase = s"${format}_merge_into_base"
+    val testTblDelta = s"${format}_merge_into_delta"
+    withJdbcStatement(testTblBase, testTblDelta) { stmt =>
+      createAndInitTable(stmt, testTblBase) {
+        (1, "HangZhou") :: (2, "Seattle") :: (3, "Beijing") :: Nil
+      }
+      createAndInitTable(stmt, testTblDelta) {
+        (2, "Chicago") :: (3, "HongKong") :: (4, "London") :: Nil
+      }
+      stmt.execute(
+        s"""MERGE INTO $testTblBase t
+           |USING (SELECT * FROM $testTblDelta) s
+           |ON t.id = s.id
+           |WHEN MATCHED AND t.id = 2 THEN UPDATE SET *
+           |WHEN MATCHED AND t.city = 'Beijing' THEN DELETE
+           |WHEN NOT MATCHED THEN INSERT *
+           |""".stripMargin)
+
+      val rs1 = stmt.executeQuery(s"SELECT * FROM $testTblBase ORDER BY id")
+      assert(rs1.next())
+      assert(rs1.getInt("id") === 1)
+      assert(rs1.getString("city") === "HangZhou")
+      assert(rs1.next())
+      assert(rs1.getInt("id") === 2)
+      assert(rs1.getString("city") === "Chicago")
+      assert(rs1.next())
+      assert(rs1.getInt("id") === 4)
+      assert(rs1.getString("city") === "London")
+      assert(!rs1.next())
+    }
+  }
+}
diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkMetadataTests.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkMetadataTests.scala
index 97099ce..f61df8c 100644
--- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkMetadataTests.scala
+++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkMetadataTests.scala
@@ -17,6 +17,12 @@
 
 package org.apache.kyuubi.operation
 
+import java.sql.{DatabaseMetaData, ResultSet, SQLException, SQLFeatureNotSupportedException}
+
+import scala.util.Random
+
+import org.apache.kyuubi.KYUUBI_VERSION
+import org.apache.kyuubi.Utils
 import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._
 
 // For both `in-memory` and `hive` external catalog
@@ -287,4 +293,187 @@ trait SparkMetadataTests extends HiveJDBCTestHelper {
       assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
     }
   }
+
+  test("audit Kyuubi Hive JDBC connection MetaData") {
+    withJdbcStatement() { statement =>
+      val metaData = statement.getConnection.getMetaData
+      Seq(
+        () => metaData.allProceduresAreCallable(),
+        () => metaData.getURL,
+        () => metaData.getUserName,
+        () => metaData.isReadOnly,
+        () => metaData.nullsAreSortedHigh,
+        () => metaData.nullsAreSortedLow,
+        () => metaData.nullsAreSortedAtStart(),
+        () => metaData.nullsAreSortedAtEnd(),
+        () => metaData.usesLocalFiles(),
+        () => metaData.usesLocalFilePerTable(),
+        () => metaData.supportsMixedCaseIdentifiers(),
+        () => metaData.supportsMixedCaseQuotedIdentifiers(),
+        () => metaData.storesUpperCaseIdentifiers(),
+        () => metaData.storesUpperCaseQuotedIdentifiers(),
+        () => metaData.storesLowerCaseIdentifiers(),
+        () => metaData.storesLowerCaseQuotedIdentifiers(),
+        () => metaData.storesMixedCaseIdentifiers(),
+        () => metaData.storesMixedCaseQuotedIdentifiers(),
+        () => metaData.getSQLKeywords,
+        () => metaData.nullPlusNonNullIsNull,
+        () => metaData.supportsConvert,
+        () => metaData.supportsTableCorrelationNames,
+        () => metaData.supportsDifferentTableCorrelationNames,
+        () => metaData.supportsExpressionsInOrderBy(),
+        () => metaData.supportsOrderByUnrelated,
+        () => metaData.supportsGroupByUnrelated,
+        () => metaData.supportsGroupByBeyondSelect,
+        () => metaData.supportsLikeEscapeClause,
+        () => metaData.supportsMultipleTransactions,
+        () => metaData.supportsMinimumSQLGrammar,
+        () => metaData.supportsCoreSQLGrammar,
+        () => metaData.supportsExtendedSQLGrammar,
+        () => metaData.supportsANSI92EntryLevelSQL,
+        () => metaData.supportsANSI92IntermediateSQL,
+        () => metaData.supportsANSI92FullSQL,
+        () => metaData.supportsIntegrityEnhancementFacility,
+        () => metaData.isCatalogAtStart,
+        () => metaData.supportsSubqueriesInComparisons,
+        () => metaData.supportsSubqueriesInExists,
+        () => metaData.supportsSubqueriesInIns,
+        () => metaData.supportsSubqueriesInQuantifieds,
+        // Spark support this, see https://issues.apache.org/jira/browse/SPARK-18455
+        () => metaData.supportsCorrelatedSubqueries,
+        () => metaData.supportsOpenCursorsAcrossCommit,
+        () => metaData.supportsOpenCursorsAcrossRollback,
+        () => metaData.supportsOpenStatementsAcrossCommit,
+        () => metaData.supportsOpenStatementsAcrossRollback,
+        () => metaData.getMaxBinaryLiteralLength,
+        () => metaData.getMaxCharLiteralLength,
+        () => metaData.getMaxColumnsInGroupBy,
+        () => metaData.getMaxColumnsInIndex,
+        () => metaData.getMaxColumnsInOrderBy,
+        () => metaData.getMaxColumnsInSelect,
+        () => metaData.getMaxColumnsInTable,
+        () => metaData.getMaxConnections,
+        () => metaData.getMaxCursorNameLength,
+        () => metaData.getMaxIndexLength,
+        () => metaData.getMaxSchemaNameLength,
+        () => metaData.getMaxProcedureNameLength,
+        () => metaData.getMaxCatalogNameLength,
+        () => metaData.getMaxRowSize,
+        () => metaData.doesMaxRowSizeIncludeBlobs,
+        () => metaData.getMaxStatementLength,
+        () => metaData.getMaxStatements,
+        () => metaData.getMaxTableNameLength,
+        () => metaData.getMaxTablesInSelect,
+        () => metaData.getMaxUserNameLength,
+        () => metaData.supportsTransactionIsolationLevel(1),
+        () => metaData.supportsDataDefinitionAndDataManipulationTransactions,
+        () => metaData.supportsDataManipulationTransactionsOnly,
+        () => metaData.dataDefinitionCausesTransactionCommit,
+        () => metaData.dataDefinitionIgnoredInTransactions,
+        () => metaData.getColumnPrivileges("", "%", "%", "%"),
+        () => metaData.getTablePrivileges("", "%", "%"),
+        () => metaData.getBestRowIdentifier("", "%", "%", 0, true),
+        () => metaData.getVersionColumns("", "%", "%"),
+        () => metaData.getExportedKeys("", "default", ""),
+        () => metaData.supportsResultSetConcurrency(ResultSet.TYPE_FORWARD_ONLY, 2),
+        () => metaData.ownUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.ownDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.ownInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.othersUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.othersDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.othersInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.updatesAreDetected(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.deletesAreDetected(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.insertsAreDetected(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.supportsNamedParameters(),
+        () => metaData.supportsMultipleOpenResults,
+        () => metaData.supportsGetGeneratedKeys,
+        () => metaData.getSuperTypes("", "%", "%"),
+        () => metaData.getSuperTables("", "%", "%"),
+        () => metaData.getAttributes("", "%", "%", "%"),
+        () => metaData.getResultSetHoldability,
+        () => metaData.locatorsUpdateCopy,
+        () => metaData.supportsStatementPooling,
+        () => metaData.getRowIdLifetime,
+        () => metaData.supportsStoredFunctionsUsingCallSyntax,
+        () => metaData.autoCommitFailureClosesAllResultSets,
+        () => metaData.getClientInfoProperties,
+        () => metaData.getFunctionColumns("", "%", "%", "%"),
+        () => metaData.getPseudoColumns("", "%", "%", "%"),
+        () => metaData.generatedKeyAlwaysReturned).foreach { func =>
+        val e = intercept[SQLFeatureNotSupportedException](func())
+        assert(e.getMessage === "Method not supported")
+      }
+
+      assert(metaData.allTablesAreSelectable)
+      assert(metaData.getDatabaseProductName === "Apache Kyuubi (Incubating)")
+      assert(metaData.getDatabaseProductVersion === KYUUBI_VERSION)
+      assert(metaData.getDriverName === "Kyuubi Project Hive JDBC Shaded Client")
+      assert(metaData.getDriverVersion === KYUUBI_VERSION)
+      assert(metaData.getDatabaseMajorVersion === Utils.majorVersion(KYUUBI_VERSION))
+      assert(metaData.getDatabaseMinorVersion === Utils.minorVersion(KYUUBI_VERSION))
+      assert(
+        metaData.getIdentifierQuoteString === " ",
+        "This method returns a space \" \" if identifier quoting is not supported")
+      assert(metaData.getNumericFunctions === "")
+      assert(metaData.getStringFunctions === "")
+      assert(metaData.getSystemFunctions === "")
+      assert(metaData.getTimeDateFunctions === "")
+      assert(metaData.getSearchStringEscape === "\\")
+      assert(metaData.getExtraNameCharacters === "")
+      assert(metaData.supportsAlterTableWithAddColumn())
+      assert(!metaData.supportsAlterTableWithDropColumn())
+      assert(metaData.supportsColumnAliasing())
+      assert(metaData.supportsGroupBy)
+      assert(!metaData.supportsMultipleResultSets)
+      assert(!metaData.supportsNonNullableColumns)
+      assert(metaData.supportsOuterJoins)
+      assert(metaData.supportsFullOuterJoins)
+      assert(metaData.supportsLimitedOuterJoins)
+      assert(metaData.getSchemaTerm === "database")
+      assert(metaData.getProcedureTerm === "UDF")
+      assert(metaData.getCatalogTerm === "instance")
+      assert(metaData.getCatalogSeparator === ".")
+      assert(metaData.supportsSchemasInDataManipulation)
+      assert(!metaData.supportsSchemasInProcedureCalls)
+      assert(metaData.supportsSchemasInTableDefinitions)
+      assert(!metaData.supportsSchemasInIndexDefinitions)
+      assert(!metaData.supportsSchemasInPrivilegeDefinitions)
+      // This is actually supported, but hive jdbc package return false
+      assert(!metaData.supportsCatalogsInDataManipulation)
+      assert(!metaData.supportsCatalogsInProcedureCalls)
+      // This is actually supported, but hive jdbc package return false
+      assert(!metaData.supportsCatalogsInTableDefinitions)
+      assert(!metaData.supportsCatalogsInIndexDefinitions)
+      assert(!metaData.supportsCatalogsInPrivilegeDefinitions)
+      assert(!metaData.supportsPositionedDelete)
+      assert(!metaData.supportsPositionedUpdate)
+      assert(!metaData.supportsSelectForUpdate)
+      assert(!metaData.supportsStoredProcedures)
+      // This is actually supported, but hive jdbc package return false
+      assert(!metaData.supportsUnion)
+      assert(metaData.supportsUnionAll)
+      assert(metaData.getMaxColumnNameLength === 128)
+      assert(metaData.getDefaultTransactionIsolation === java.sql.Connection.TRANSACTION_NONE)
+      assert(!metaData.supportsTransactions)
+      assert(!metaData.getProcedureColumns("", "%", "%", "%").next())
+      intercept[SQLException](metaData.getPrimaryKeys("", "default", ""))
+      assert(!metaData.getImportedKeys("", "default", "").next())
+      intercept[SQLException] {
+        metaData.getCrossReference("", "default", "src", "", "default", "src2")
+      }
+      assert(!metaData.getIndexInfo("", "default", "src", true, true).next())
+
+      assert(metaData.supportsResultSetType(new Random().nextInt()))
+      assert(!metaData.supportsBatchUpdates)
+      assert(!metaData.getUDTs(",", "%", "%", null).next())
+      assert(!metaData.supportsSavepoints)
+      assert(!metaData.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT))
+      assert(metaData.getJDBCMajorVersion === 3)
+      assert(metaData.getJDBCMinorVersion === 0)
+      assert(metaData.getSQLStateType === DatabaseMetaData.sqlStateSQL)
+      assert(metaData.getMaxLogicalLobSize === 0)
+      assert(!metaData.supportsRefCursors)
+    }
+  }
 }