You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kyuubi.apache.org by ul...@apache.org on 2023/02/16 09:54:04 UTC

[kyuubi] branch master updated: [KYUUBI #4328] Make Trino jdbc driver work

This is an automated email from the ASF dual-hosted git repository.

ulyssesyou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kyuubi.git


The following commit(s) were added to refs/heads/master by this push:
     new 6688b3dac [KYUUBI #4328] Make Trino jdbc driver work
6688b3dac is described below

commit 6688b3dacf80ac519f6a0d56f8168d15166f5916
Author: ulysses-you <ul...@gmail.com>
AuthorDate: Thu Feb 16 17:53:55 2023 +0800

    [KYUUBI #4328] Make Trino jdbc driver work
    
    ### _Why are the changes needed?_
    
    according to `io.trino.jdbc.ColumnInfo`, there are some type requring signature parameter.
    - varchar(n)
    - char(n)
    - decimal(precision, scale)
    
    It failed with trino jdbc now
    <img width="613" alt="image" src="https://user-images.githubusercontent.com/12025282/218707052-a2e9dc91-0333-483c-bc0a-96baec213578.png">
    
    ### _How was this patch tested?_
    - [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
    
    - [ ] Add screenshots for manual tests if appropriate
    
    - [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
    
    Closes #4328 from ulysses-you/fix-signature.
    
    Closes #4328
    
    aede5cec [ulysses-you] nit
    ae1a7968 [ulysses-you] fix test
    8ecdb346 [ulysses-you] Make Trino jdbc driver work
    
    Authored-by: ulysses-you <ul...@gmail.com>
    Signed-off-by: ulyssesyou <ul...@apache.org>
---
 .github/workflows/master.yml                       |   6 +-
 .../it/trino/server/TrinoFrontendSuite.scala       |  61 +++++++
 .../kyuubi/operation/HiveMetadataTests.scala       | 188 ++++++++++++++++++++-
 .../kyuubi/operation/SparkMetadataTests.scala      | 187 --------------------
 .../kyuubi/client/api/v1/dto/VersionInfo.java      |   7 +-
 .../kyuubi/server/trino/api/TrinoContext.scala     | 128 +++++++++++---
 .../server/trino/api/TrinoScalaObjectMapper.scala  |  12 +-
 .../server/trino/api/v1/StatementResource.scala    |   3 +-
 .../apache/kyuubi/server/trino/api/v1/dto/Ok.java  |  11 +-
 9 files changed, 383 insertions(+), 220 deletions(-)

diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index ed7403fbc..26d231297 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -294,9 +294,9 @@ jobs:
           check-latest: false
       - name: Build and test Trino with maven w/o linters
         run: |
-          TEST_MODULES="externals/kyuubi-trino-engine,integration-tests/kyuubi-trino-it"
-          ./build/mvn ${MVN_OPT} -pl ${TEST_MODULES} -am clean install -DskipTests
-          ./build/mvn ${MVN_OPT} -pl ${TEST_MODULES} test
+          TEST_MODULES="kyuubi-server,externals/kyuubi-trino-engine,externals/kyuubi-spark-sql-engine,externals/kyuubi-download,integration-tests/kyuubi-trino-it"
+          ./build/mvn ${MVN_OPT} -pl ${TEST_MODULES} -am -Pflink-provided -Phive-provided clean install -DskipTests
+          ./build/mvn -Dmaven.javadoc.skip=true -Drat.skip=true -Dscalastyle.skip=true -Dspotless.check.skip -pl ${TEST_MODULES} -am -Pflink-provided -Phive-provided test -Dtest=none -DwildcardSuites=org.apache.kyuubi.it.trino.operation.TrinoOperationSuite,org.apache.kyuubi.it.trino.server.TrinoFrontendSuite
       - name: Upload test logs
         if: failure()
         uses: actions/upload-artifact@v3
diff --git a/integration-tests/kyuubi-trino-it/src/test/scala/org/apache/kyuubi/it/trino/server/TrinoFrontendSuite.scala b/integration-tests/kyuubi-trino-it/src/test/scala/org/apache/kyuubi/it/trino/server/TrinoFrontendSuite.scala
new file mode 100644
index 000000000..bd8bf3eda
--- /dev/null
+++ b/integration-tests/kyuubi-trino-it/src/test/scala/org/apache/kyuubi/it/trino/server/TrinoFrontendSuite.scala
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.it.trino.server
+
+import scala.util.control.NonFatal
+
+import org.apache.kyuubi.WithKyuubiServer
+import org.apache.kyuubi.config.KyuubiConf
+import org.apache.kyuubi.operation.SparkMetadataTests
+
+/**
+ * This test is for Trino jdbc driver with Kyuubi Server and Spark engine:
+ *
+ *  -------------------------------------------------------------
+ *  |                JDBC                                       |
+ *  |  Trino-driver  ---->  Kyuubi Server  -->  Spark Engine    |
+ *  |                                                           |
+ *  -------------------------------------------------------------
+ */
+class TrinoFrontendSuite extends WithKyuubiServer with SparkMetadataTests {
+  // TODO: Add more test cases
+
+  override protected val conf: KyuubiConf = {
+    KyuubiConf().set(KyuubiConf.FRONTEND_PROTOCOLS, Seq("TRINO"))
+  }
+  override protected def jdbcUrl: String = {
+    s"jdbc:trino://${server.frontendServices.head.connectionUrl}/;"
+  }
+
+  // trino jdbc driver requires enable SSL if specify password
+  override protected val password: String = ""
+
+  override def beforeAll(): Unit = {
+    super.beforeAll()
+
+    // eagerly start spark engine before running test, it's a workaround for trino jdbc driver
+    // since it does not support changing http connect timeout
+    try {
+      withJdbcStatement() { statement =>
+        statement.execute("SELECT 1")
+      }
+    } catch {
+      case NonFatal(e) =>
+    }
+  }
+}
diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/HiveMetadataTests.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/HiveMetadataTests.scala
index fe1f5f47b..aad31d5b8 100644
--- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/HiveMetadataTests.scala
+++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/HiveMetadataTests.scala
@@ -17,7 +17,11 @@
 
 package org.apache.kyuubi.operation
 
-import org.apache.kyuubi.Utils
+import java.sql.{DatabaseMetaData, ResultSet, SQLException, SQLFeatureNotSupportedException}
+
+import scala.util.Random
+
+import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiSQLException, Utils}
 import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._
 
 // For `hive` external catalog only
@@ -98,4 +102,186 @@ trait HiveMetadataTests extends SparkMetadataTests {
       statement.execute(s"DROP VIEW IF EXISTS ${schemas(3)}.$view_global_test")
     }
   }
+
+  test("audit Kyuubi Hive JDBC connection common MetaData") {
+    withJdbcStatement() { statement =>
+      val metaData = statement.getConnection.getMetaData
+      Seq(
+        () => metaData.allProceduresAreCallable(),
+        () => metaData.getURL,
+        () => metaData.getUserName,
+        () => metaData.isReadOnly,
+        () => metaData.nullsAreSortedHigh,
+        () => metaData.nullsAreSortedLow,
+        () => metaData.nullsAreSortedAtStart(),
+        () => metaData.nullsAreSortedAtEnd(),
+        () => metaData.usesLocalFiles(),
+        () => metaData.usesLocalFilePerTable(),
+        () => metaData.supportsMixedCaseIdentifiers(),
+        () => metaData.supportsMixedCaseQuotedIdentifiers(),
+        () => metaData.storesUpperCaseIdentifiers(),
+        () => metaData.storesUpperCaseQuotedIdentifiers(),
+        () => metaData.storesLowerCaseIdentifiers(),
+        () => metaData.storesLowerCaseQuotedIdentifiers(),
+        () => metaData.storesMixedCaseIdentifiers(),
+        () => metaData.storesMixedCaseQuotedIdentifiers(),
+        () => metaData.nullPlusNonNullIsNull,
+        () => metaData.supportsConvert,
+        () => metaData.supportsTableCorrelationNames,
+        () => metaData.supportsDifferentTableCorrelationNames,
+        () => metaData.supportsExpressionsInOrderBy(),
+        () => metaData.supportsOrderByUnrelated,
+        () => metaData.supportsGroupByUnrelated,
+        () => metaData.supportsGroupByBeyondSelect,
+        () => metaData.supportsLikeEscapeClause,
+        () => metaData.supportsMultipleTransactions,
+        () => metaData.supportsMinimumSQLGrammar,
+        () => metaData.supportsCoreSQLGrammar,
+        () => metaData.supportsExtendedSQLGrammar,
+        () => metaData.supportsANSI92EntryLevelSQL,
+        () => metaData.supportsANSI92IntermediateSQL,
+        () => metaData.supportsANSI92FullSQL,
+        () => metaData.supportsIntegrityEnhancementFacility,
+        () => metaData.isCatalogAtStart,
+        () => metaData.supportsSubqueriesInComparisons,
+        () => metaData.supportsSubqueriesInExists,
+        () => metaData.supportsSubqueriesInIns,
+        () => metaData.supportsSubqueriesInQuantifieds,
+        // Spark support this, see https://issues.apache.org/jira/browse/SPARK-18455
+        () => metaData.supportsCorrelatedSubqueries,
+        () => metaData.supportsOpenCursorsAcrossCommit,
+        () => metaData.supportsOpenCursorsAcrossRollback,
+        () => metaData.supportsOpenStatementsAcrossCommit,
+        () => metaData.supportsOpenStatementsAcrossRollback,
+        () => metaData.getMaxBinaryLiteralLength,
+        () => metaData.getMaxCharLiteralLength,
+        () => metaData.getMaxColumnsInGroupBy,
+        () => metaData.getMaxColumnsInIndex,
+        () => metaData.getMaxColumnsInOrderBy,
+        () => metaData.getMaxColumnsInSelect,
+        () => metaData.getMaxColumnsInTable,
+        () => metaData.getMaxConnections,
+        () => metaData.getMaxCursorNameLength,
+        () => metaData.getMaxIndexLength,
+        () => metaData.getMaxSchemaNameLength,
+        () => metaData.getMaxProcedureNameLength,
+        () => metaData.getMaxCatalogNameLength,
+        () => metaData.getMaxRowSize,
+        () => metaData.doesMaxRowSizeIncludeBlobs,
+        () => metaData.getMaxStatementLength,
+        () => metaData.getMaxStatements,
+        () => metaData.getMaxTableNameLength,
+        () => metaData.getMaxTablesInSelect,
+        () => metaData.getMaxUserNameLength,
+        () => metaData.supportsTransactionIsolationLevel(1),
+        () => metaData.supportsDataDefinitionAndDataManipulationTransactions,
+        () => metaData.supportsDataManipulationTransactionsOnly,
+        () => metaData.dataDefinitionCausesTransactionCommit,
+        () => metaData.dataDefinitionIgnoredInTransactions,
+        () => metaData.getColumnPrivileges("", "%", "%", "%"),
+        () => metaData.getTablePrivileges("", "%", "%"),
+        () => metaData.getBestRowIdentifier("", "%", "%", 0, true),
+        () => metaData.getVersionColumns("", "%", "%"),
+        () => metaData.getExportedKeys("", "default", ""),
+        () => metaData.supportsResultSetConcurrency(ResultSet.TYPE_FORWARD_ONLY, 2),
+        () => metaData.ownUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.ownDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.ownInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.othersUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.othersDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.othersInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.updatesAreDetected(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.deletesAreDetected(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.insertsAreDetected(ResultSet.TYPE_FORWARD_ONLY),
+        () => metaData.supportsNamedParameters(),
+        () => metaData.supportsMultipleOpenResults,
+        () => metaData.supportsGetGeneratedKeys,
+        () => metaData.getSuperTypes("", "%", "%"),
+        () => metaData.getSuperTables("", "%", "%"),
+        () => metaData.getAttributes("", "%", "%", "%"),
+        () => metaData.getResultSetHoldability,
+        () => metaData.locatorsUpdateCopy,
+        () => metaData.supportsStatementPooling,
+        () => metaData.getRowIdLifetime,
+        () => metaData.supportsStoredFunctionsUsingCallSyntax,
+        () => metaData.autoCommitFailureClosesAllResultSets,
+        () => metaData.getFunctionColumns("", "%", "%", "%"),
+        () => metaData.getPseudoColumns("", "%", "%", "%"),
+        () => metaData.generatedKeyAlwaysReturned).foreach { func =>
+        val e = intercept[SQLFeatureNotSupportedException](func())
+        assert(e.getMessage === "Method not supported")
+      }
+
+      assert(metaData.allTablesAreSelectable)
+      assert(metaData.getClientInfoProperties.next)
+      assert(metaData.getDriverName === "Kyuubi Project Hive JDBC Client" ||
+        metaData.getDriverName === "Kyuubi Project Hive JDBC Shaded Client")
+      assert(metaData.getDriverVersion === KYUUBI_VERSION)
+      assert(
+        metaData.getIdentifierQuoteString === " ",
+        "This method returns a space \" \" if identifier quoting is not supported")
+      assert(metaData.getNumericFunctions === "")
+      assert(metaData.getStringFunctions === "")
+      assert(metaData.getSystemFunctions === "")
+      assert(metaData.getTimeDateFunctions === "")
+      assert(metaData.getSearchStringEscape === "\\")
+      assert(metaData.getExtraNameCharacters === "")
+      assert(metaData.supportsAlterTableWithAddColumn())
+      assert(!metaData.supportsAlterTableWithDropColumn())
+      assert(metaData.supportsColumnAliasing())
+      assert(metaData.supportsGroupBy)
+      assert(!metaData.supportsMultipleResultSets)
+      assert(!metaData.supportsNonNullableColumns)
+      assert(metaData.supportsOuterJoins)
+      assert(metaData.supportsFullOuterJoins)
+      assert(metaData.supportsLimitedOuterJoins)
+      assert(metaData.getSchemaTerm === "database")
+      assert(metaData.getProcedureTerm === "UDF")
+      assert(metaData.getCatalogTerm === "catalog")
+      assert(metaData.getCatalogSeparator === ".")
+      assert(metaData.supportsSchemasInDataManipulation)
+      assert(!metaData.supportsSchemasInProcedureCalls)
+      assert(metaData.supportsSchemasInTableDefinitions)
+      assert(!metaData.supportsSchemasInIndexDefinitions)
+      assert(!metaData.supportsSchemasInPrivilegeDefinitions)
+      assert(metaData.supportsCatalogsInDataManipulation)
+      assert(metaData.supportsCatalogsInProcedureCalls)
+      assert(metaData.supportsCatalogsInTableDefinitions)
+      assert(metaData.supportsCatalogsInIndexDefinitions)
+      assert(metaData.supportsCatalogsInPrivilegeDefinitions)
+      assert(!metaData.supportsPositionedDelete)
+      assert(!metaData.supportsPositionedUpdate)
+      assert(!metaData.supportsSelectForUpdate)
+      assert(!metaData.supportsStoredProcedures)
+      // This is actually supported, but hive jdbc package return false
+      assert(!metaData.supportsUnion)
+      assert(metaData.supportsUnionAll)
+      assert(metaData.getMaxColumnNameLength === 128)
+      assert(metaData.getDefaultTransactionIsolation === java.sql.Connection.TRANSACTION_NONE)
+      assert(!metaData.supportsTransactions)
+      assert(!metaData.getProcedureColumns("", "%", "%", "%").next())
+      val e1 = intercept[SQLException] {
+        metaData.getPrimaryKeys("", "default", "src").next()
+      }
+      assert(e1.getMessage.contains(KyuubiSQLException.featureNotSupported().getMessage))
+      assert(!metaData.getImportedKeys("", "default", "").next())
+
+      val e2 = intercept[SQLException] {
+        metaData.getCrossReference("", "default", "src", "", "default", "src2").next()
+      }
+      assert(e2.getMessage.contains(KyuubiSQLException.featureNotSupported().getMessage))
+      assert(!metaData.getIndexInfo("", "default", "src", true, true).next())
+
+      assert(metaData.supportsResultSetType(new Random().nextInt()))
+      assert(!metaData.supportsBatchUpdates)
+      assert(!metaData.getUDTs(",", "%", "%", null).next())
+      assert(!metaData.supportsSavepoints)
+      assert(!metaData.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT))
+      assert(metaData.getJDBCMajorVersion === 3)
+      assert(metaData.getJDBCMinorVersion === 0)
+      assert(metaData.getSQLStateType === DatabaseMetaData.sqlStateSQL)
+      assert(metaData.getMaxLogicalLobSize === 0)
+      assert(!metaData.supportsRefCursors)
+    }
+  }
 }
diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkMetadataTests.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkMetadataTests.scala
index 4faf5bba4..97099ce47 100644
--- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkMetadataTests.scala
+++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkMetadataTests.scala
@@ -17,11 +17,6 @@
 
 package org.apache.kyuubi.operation
 
-import java.sql.{DatabaseMetaData, ResultSet, SQLException, SQLFeatureNotSupportedException}
-
-import scala.util.Random
-
-import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiSQLException}
 import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._
 
 // For both `in-memory` and `hive` external catalog
@@ -292,186 +287,4 @@ trait SparkMetadataTests extends HiveJDBCTestHelper {
       assert(typeInfo.getInt(NUM_PREC_RADIX) === 0)
     }
   }
-
-  test("audit Kyuubi Hive JDBC connection common MetaData") {
-    withJdbcStatement() { statement =>
-      val metaData = statement.getConnection.getMetaData
-      Seq(
-        () => metaData.allProceduresAreCallable(),
-        () => metaData.getURL,
-        () => metaData.getUserName,
-        () => metaData.isReadOnly,
-        () => metaData.nullsAreSortedHigh,
-        () => metaData.nullsAreSortedLow,
-        () => metaData.nullsAreSortedAtStart(),
-        () => metaData.nullsAreSortedAtEnd(),
-        () => metaData.usesLocalFiles(),
-        () => metaData.usesLocalFilePerTable(),
-        () => metaData.supportsMixedCaseIdentifiers(),
-        () => metaData.supportsMixedCaseQuotedIdentifiers(),
-        () => metaData.storesUpperCaseIdentifiers(),
-        () => metaData.storesUpperCaseQuotedIdentifiers(),
-        () => metaData.storesLowerCaseIdentifiers(),
-        () => metaData.storesLowerCaseQuotedIdentifiers(),
-        () => metaData.storesMixedCaseIdentifiers(),
-        () => metaData.storesMixedCaseQuotedIdentifiers(),
-        () => metaData.nullPlusNonNullIsNull,
-        () => metaData.supportsConvert,
-        () => metaData.supportsTableCorrelationNames,
-        () => metaData.supportsDifferentTableCorrelationNames,
-        () => metaData.supportsExpressionsInOrderBy(),
-        () => metaData.supportsOrderByUnrelated,
-        () => metaData.supportsGroupByUnrelated,
-        () => metaData.supportsGroupByBeyondSelect,
-        () => metaData.supportsLikeEscapeClause,
-        () => metaData.supportsMultipleTransactions,
-        () => metaData.supportsMinimumSQLGrammar,
-        () => metaData.supportsCoreSQLGrammar,
-        () => metaData.supportsExtendedSQLGrammar,
-        () => metaData.supportsANSI92EntryLevelSQL,
-        () => metaData.supportsANSI92IntermediateSQL,
-        () => metaData.supportsANSI92FullSQL,
-        () => metaData.supportsIntegrityEnhancementFacility,
-        () => metaData.isCatalogAtStart,
-        () => metaData.supportsSubqueriesInComparisons,
-        () => metaData.supportsSubqueriesInExists,
-        () => metaData.supportsSubqueriesInIns,
-        () => metaData.supportsSubqueriesInQuantifieds,
-        // Spark support this, see https://issues.apache.org/jira/browse/SPARK-18455
-        () => metaData.supportsCorrelatedSubqueries,
-        () => metaData.supportsOpenCursorsAcrossCommit,
-        () => metaData.supportsOpenCursorsAcrossRollback,
-        () => metaData.supportsOpenStatementsAcrossCommit,
-        () => metaData.supportsOpenStatementsAcrossRollback,
-        () => metaData.getMaxBinaryLiteralLength,
-        () => metaData.getMaxCharLiteralLength,
-        () => metaData.getMaxColumnsInGroupBy,
-        () => metaData.getMaxColumnsInIndex,
-        () => metaData.getMaxColumnsInOrderBy,
-        () => metaData.getMaxColumnsInSelect,
-        () => metaData.getMaxColumnsInTable,
-        () => metaData.getMaxConnections,
-        () => metaData.getMaxCursorNameLength,
-        () => metaData.getMaxIndexLength,
-        () => metaData.getMaxSchemaNameLength,
-        () => metaData.getMaxProcedureNameLength,
-        () => metaData.getMaxCatalogNameLength,
-        () => metaData.getMaxRowSize,
-        () => metaData.doesMaxRowSizeIncludeBlobs,
-        () => metaData.getMaxStatementLength,
-        () => metaData.getMaxStatements,
-        () => metaData.getMaxTableNameLength,
-        () => metaData.getMaxTablesInSelect,
-        () => metaData.getMaxUserNameLength,
-        () => metaData.supportsTransactionIsolationLevel(1),
-        () => metaData.supportsDataDefinitionAndDataManipulationTransactions,
-        () => metaData.supportsDataManipulationTransactionsOnly,
-        () => metaData.dataDefinitionCausesTransactionCommit,
-        () => metaData.dataDefinitionIgnoredInTransactions,
-        () => metaData.getColumnPrivileges("", "%", "%", "%"),
-        () => metaData.getTablePrivileges("", "%", "%"),
-        () => metaData.getBestRowIdentifier("", "%", "%", 0, true),
-        () => metaData.getVersionColumns("", "%", "%"),
-        () => metaData.getExportedKeys("", "default", ""),
-        () => metaData.supportsResultSetConcurrency(ResultSet.TYPE_FORWARD_ONLY, 2),
-        () => metaData.ownUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.ownDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.ownInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.othersUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.othersDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.othersInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.updatesAreDetected(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.deletesAreDetected(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.insertsAreDetected(ResultSet.TYPE_FORWARD_ONLY),
-        () => metaData.supportsNamedParameters(),
-        () => metaData.supportsMultipleOpenResults,
-        () => metaData.supportsGetGeneratedKeys,
-        () => metaData.getSuperTypes("", "%", "%"),
-        () => metaData.getSuperTables("", "%", "%"),
-        () => metaData.getAttributes("", "%", "%", "%"),
-        () => metaData.getResultSetHoldability,
-        () => metaData.locatorsUpdateCopy,
-        () => metaData.supportsStatementPooling,
-        () => metaData.getRowIdLifetime,
-        () => metaData.supportsStoredFunctionsUsingCallSyntax,
-        () => metaData.autoCommitFailureClosesAllResultSets,
-        () => metaData.getFunctionColumns("", "%", "%", "%"),
-        () => metaData.getPseudoColumns("", "%", "%", "%"),
-        () => metaData.generatedKeyAlwaysReturned).foreach { func =>
-        val e = intercept[SQLFeatureNotSupportedException](func())
-        assert(e.getMessage === "Method not supported")
-      }
-
-      assert(metaData.allTablesAreSelectable)
-      assert(metaData.getClientInfoProperties.next)
-      assert(metaData.getDriverName === "Kyuubi Project Hive JDBC Client" ||
-        metaData.getDriverName === "Kyuubi Project Hive JDBC Shaded Client")
-      assert(metaData.getDriverVersion === KYUUBI_VERSION)
-      assert(
-        metaData.getIdentifierQuoteString === " ",
-        "This method returns a space \" \" if identifier quoting is not supported")
-      assert(metaData.getNumericFunctions === "")
-      assert(metaData.getStringFunctions === "")
-      assert(metaData.getSystemFunctions === "")
-      assert(metaData.getTimeDateFunctions === "")
-      assert(metaData.getSearchStringEscape === "\\")
-      assert(metaData.getExtraNameCharacters === "")
-      assert(metaData.supportsAlterTableWithAddColumn())
-      assert(!metaData.supportsAlterTableWithDropColumn())
-      assert(metaData.supportsColumnAliasing())
-      assert(metaData.supportsGroupBy)
-      assert(!metaData.supportsMultipleResultSets)
-      assert(!metaData.supportsNonNullableColumns)
-      assert(metaData.supportsOuterJoins)
-      assert(metaData.supportsFullOuterJoins)
-      assert(metaData.supportsLimitedOuterJoins)
-      assert(metaData.getSchemaTerm === "database")
-      assert(metaData.getProcedureTerm === "UDF")
-      assert(metaData.getCatalogTerm === "catalog")
-      assert(metaData.getCatalogSeparator === ".")
-      assert(metaData.supportsSchemasInDataManipulation)
-      assert(!metaData.supportsSchemasInProcedureCalls)
-      assert(metaData.supportsSchemasInTableDefinitions)
-      assert(!metaData.supportsSchemasInIndexDefinitions)
-      assert(!metaData.supportsSchemasInPrivilegeDefinitions)
-      assert(metaData.supportsCatalogsInDataManipulation)
-      assert(metaData.supportsCatalogsInProcedureCalls)
-      assert(metaData.supportsCatalogsInTableDefinitions)
-      assert(metaData.supportsCatalogsInIndexDefinitions)
-      assert(metaData.supportsCatalogsInPrivilegeDefinitions)
-      assert(!metaData.supportsPositionedDelete)
-      assert(!metaData.supportsPositionedUpdate)
-      assert(!metaData.supportsSelectForUpdate)
-      assert(!metaData.supportsStoredProcedures)
-      // This is actually supported, but hive jdbc package return false
-      assert(!metaData.supportsUnion)
-      assert(metaData.supportsUnionAll)
-      assert(metaData.getMaxColumnNameLength === 128)
-      assert(metaData.getDefaultTransactionIsolation === java.sql.Connection.TRANSACTION_NONE)
-      assert(!metaData.supportsTransactions)
-      assert(!metaData.getProcedureColumns("", "%", "%", "%").next())
-      val e1 = intercept[SQLException] {
-        metaData.getPrimaryKeys("", "default", "src").next()
-      }
-      assert(e1.getMessage.contains(KyuubiSQLException.featureNotSupported().getMessage))
-      assert(!metaData.getImportedKeys("", "default", "").next())
-
-      val e2 = intercept[SQLException] {
-        metaData.getCrossReference("", "default", "src", "", "default", "src2").next()
-      }
-      assert(e2.getMessage.contains(KyuubiSQLException.featureNotSupported().getMessage))
-      assert(!metaData.getIndexInfo("", "default", "src", true, true).next())
-
-      assert(metaData.supportsResultSetType(new Random().nextInt()))
-      assert(!metaData.supportsBatchUpdates)
-      assert(!metaData.getUDTs(",", "%", "%", null).next())
-      assert(!metaData.supportsSavepoints)
-      assert(!metaData.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT))
-      assert(metaData.getJDBCMajorVersion === 3)
-      assert(metaData.getJDBCMinorVersion === 0)
-      assert(metaData.getSQLStateType === DatabaseMetaData.sqlStateSQL)
-      assert(metaData.getMaxLogicalLobSize === 0)
-      assert(!metaData.supportsRefCursors)
-    }
-  }
 }
diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/VersionInfo.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/VersionInfo.java
index 427272f41..5749c4e32 100644
--- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/VersionInfo.java
+++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/VersionInfo.java
@@ -17,6 +17,8 @@
 
 package org.apache.kyuubi.client.api.v1.dto;
 
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
 import java.util.Objects;
 import org.apache.commons.lang3.builder.ReflectionToStringBuilder;
 import org.apache.commons.lang3.builder.ToStringStyle;
@@ -26,10 +28,13 @@ public class VersionInfo {
 
   public VersionInfo() {}
 
-  public VersionInfo(String version) {
+  // Explicitly specifies JsonProperty to be compatible if disable auto detect feature
+  @JsonCreator
+  public VersionInfo(@JsonProperty("version") String version) {
     this.version = version;
   }
 
+  @JsonProperty
   public String getVersion() {
     return version;
   }
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoContext.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoContext.scala
index 0c7911a46..8c85f31d7 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoContext.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoContext.scala
@@ -20,13 +20,15 @@ package org.apache.kyuubi.server.trino.api
 import java.io.UnsupportedEncodingException
 import java.net.{URI, URLDecoder, URLEncoder}
 import java.util
+import java.util.Optional
 import javax.ws.rs.core.{HttpHeaders, Response}
 
 import scala.collection.JavaConverters._
 
-import io.trino.client.{ClientStandardTypes, ClientTypeSignature, Column, QueryError, QueryResults, StatementStats, Warning}
+import com.google.common.collect.ImmutableList
+import io.trino.client.{ClientStandardTypes, ClientTypeSignature, ClientTypeSignatureParameter, Column, NamedClientTypeSignature, QueryError, QueryResults, RowFieldName, StatementStats, Warning}
 import io.trino.client.ProtocolHeaders.TRINO_HEADERS
-import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TRowSet, TTypeId}
+import org.apache.hive.service.rpc.thrift.{TCLIServiceConstants, TGetResultSetMetadataResp, TRowSet, TTypeEntry, TTypeId}
 
 import org.apache.kyuubi.operation.OperationState.FINISHED
 import org.apache.kyuubi.operation.OperationStatus
@@ -216,32 +218,110 @@ object TrinoContext {
       0L)
   }
 
-  def convertTColumn(columns: TGetResultSetMetadataResp): util.List[Column] = {
+  private def convertTColumn(columns: TGetResultSetMetadataResp): util.List[Column] = {
     columns.getSchema.getColumns.asScala.map(c => {
-      val tp = c.getTypeDesc.getTypes.get(0).getPrimitiveEntry.getType match {
-        case TTypeId.BOOLEAN_TYPE => ClientStandardTypes.BOOLEAN
-        case TTypeId.TINYINT_TYPE => ClientStandardTypes.TINYINT
-        case TTypeId.SMALLINT_TYPE => ClientStandardTypes.SMALLINT
-        case TTypeId.INT_TYPE => ClientStandardTypes.INTEGER
-        case TTypeId.BIGINT_TYPE => ClientStandardTypes.BIGINT
-        case TTypeId.FLOAT_TYPE => ClientStandardTypes.DOUBLE
-        case TTypeId.DOUBLE_TYPE => ClientStandardTypes.DOUBLE
-        case TTypeId.STRING_TYPE => ClientStandardTypes.VARCHAR
-        case TTypeId.TIMESTAMP_TYPE => ClientStandardTypes.TIMESTAMP
-        case TTypeId.BINARY_TYPE => ClientStandardTypes.VARBINARY
-        case TTypeId.DECIMAL_TYPE => ClientStandardTypes.DECIMAL
-        case TTypeId.DATE_TYPE => ClientStandardTypes.DATE
-        case TTypeId.VARCHAR_TYPE => ClientStandardTypes.VARCHAR
-        case TTypeId.CHAR_TYPE => ClientStandardTypes.CHAR
-        case TTypeId.INTERVAL_YEAR_MONTH_TYPE => ClientStandardTypes.INTERVAL_YEAR_TO_MONTH
-        case TTypeId.INTERVAL_DAY_TIME_TYPE => ClientStandardTypes.TIME_WITH_TIME_ZONE
-        case TTypeId.TIMESTAMPLOCALTZ_TYPE => ClientStandardTypes.TIMESTAMP_WITH_TIME_ZONE
-        case _ => ClientStandardTypes.VARCHAR
-      }
-      new Column(c.getColumnName, tp, new ClientTypeSignature(tp))
+      val (tp, arguments) = toClientTypeSignature(c.getTypeDesc.getTypes.get(0))
+      new Column(c.getColumnName, tp, new ClientTypeSignature(tp, arguments))
     }).toList.asJava
   }
 
+  private def toClientTypeSignature(
+      entry: TTypeEntry): (String, util.List[ClientTypeSignatureParameter]) = {
+    // according to `io.trino.jdbc.ColumnInfo`
+    if (entry.isSetPrimitiveEntry) {
+      entry.getPrimitiveEntry.getType match {
+        case TTypeId.BOOLEAN_TYPE =>
+          (ClientStandardTypes.BOOLEAN, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.TINYINT_TYPE =>
+          (ClientStandardTypes.TINYINT, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.SMALLINT_TYPE =>
+          (ClientStandardTypes.SMALLINT, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.INT_TYPE =>
+          (ClientStandardTypes.INTEGER, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.BIGINT_TYPE =>
+          (ClientStandardTypes.BIGINT, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.FLOAT_TYPE =>
+          (ClientStandardTypes.DOUBLE, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.DOUBLE_TYPE =>
+          (ClientStandardTypes.DOUBLE, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.DATE_TYPE =>
+          (ClientStandardTypes.DATE, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.TIMESTAMP_TYPE =>
+          (ClientStandardTypes.TIMESTAMP, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.BINARY_TYPE =>
+          (ClientStandardTypes.VARBINARY, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.DECIMAL_TYPE =>
+          val map = entry.getPrimitiveEntry.getTypeQualifiers.getQualifiers
+          val precision = Option(map.get(TCLIServiceConstants.PRECISION)).map(_.getI32Value)
+            .getOrElse(38)
+          val scale = Option(map.get(TCLIServiceConstants.SCALE)).map(_.getI32Value)
+            .getOrElse(18)
+          (
+            ClientStandardTypes.DECIMAL,
+            ImmutableList.of(
+              ClientTypeSignatureParameter.ofLong(precision),
+              ClientTypeSignatureParameter.ofLong(scale)))
+        case TTypeId.STRING_TYPE =>
+          (
+            ClientStandardTypes.VARCHAR,
+            varcharSignatureParameter)
+        case TTypeId.VARCHAR_TYPE =>
+          (
+            ClientStandardTypes.VARCHAR,
+            varcharSignatureParameter)
+        case TTypeId.CHAR_TYPE =>
+          (ClientStandardTypes.CHAR, ImmutableList.of(ClientTypeSignatureParameter.ofLong(65536)))
+        case TTypeId.INTERVAL_YEAR_MONTH_TYPE =>
+          (
+            ClientStandardTypes.INTERVAL_YEAR_TO_MONTH,
+            ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.INTERVAL_DAY_TIME_TYPE =>
+          (ClientStandardTypes.TIME_WITH_TIME_ZONE, ImmutableList.of[ClientTypeSignatureParameter])
+        case TTypeId.TIMESTAMPLOCALTZ_TYPE =>
+          (
+            ClientStandardTypes.TIMESTAMP_WITH_TIME_ZONE,
+            ImmutableList.of[ClientTypeSignatureParameter])
+        case _ =>
+          (
+            ClientStandardTypes.VARCHAR,
+            varcharSignatureParameter)
+      }
+    } else if (entry.isSetArrayEntry) {
+      // thrift does not support nested types.
+      // it's quite hard to follow the hive way, so always return varchar
+      // TODO: make complex data type more accurate
+      (
+        ClientStandardTypes.ARRAY,
+        ImmutableList.of(ClientTypeSignatureParameter.ofType(
+          new ClientTypeSignature(ClientStandardTypes.VARCHAR, varcharSignatureParameter))))
+    } else if (entry.isSetMapEntry) {
+      (
+        ClientStandardTypes.MAP,
+        ImmutableList.of(
+          ClientTypeSignatureParameter.ofType(
+            new ClientTypeSignature(ClientStandardTypes.VARCHAR, varcharSignatureParameter)),
+          ClientTypeSignatureParameter.ofType(
+            new ClientTypeSignature(ClientStandardTypes.VARCHAR, varcharSignatureParameter))))
+    } else if (entry.isSetStructEntry) {
+      val parameters = entry.getStructEntry.getNameToTypePtr.asScala.map { case (k, v) =>
+        ClientTypeSignatureParameter.ofNamedType(
+          new NamedClientTypeSignature(
+            Optional.of(new RowFieldName(k)),
+            new ClientTypeSignature(ClientStandardTypes.VARCHAR, varcharSignatureParameter)))
+      }
+      (
+        ClientStandardTypes.ROW,
+        ImmutableList.copyOf(parameters.toArray))
+    } else {
+      throw new UnsupportedOperationException(s"Do not support type: $entry")
+    }
+  }
+
+  private def varcharSignatureParameter: util.List[ClientTypeSignatureParameter] = {
+    ImmutableList.of(ClientTypeSignatureParameter.ofLong(
+      ClientTypeSignature.VARCHAR_UNBOUNDED_LENGTH))
+  }
+
   def convertTRowSet(rowSet: TRowSet): util.List[util.List[Object]] = {
     val dataResult = new util.LinkedList[util.List[Object]]
 
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoScalaObjectMapper.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoScalaObjectMapper.scala
index f6055927a..33091e338 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoScalaObjectMapper.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoScalaObjectMapper.scala
@@ -19,13 +19,23 @@ package org.apache.kyuubi.server.trino.api
 
 import javax.ws.rs.ext.ContextResolver
 
-import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper}
+import com.fasterxml.jackson.databind.{DeserializationFeature, MapperFeature, ObjectMapper}
 import com.fasterxml.jackson.datatype.jdk8.Jdk8Module
 
 class TrinoScalaObjectMapper extends ContextResolver[ObjectMapper] {
 
+  // refer `io.trino.client.JsonCodec`
   private lazy val mapper = new ObjectMapper()
     .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
+    .disable(MapperFeature.AUTO_DETECT_CREATORS)
+    .disable(MapperFeature.AUTO_DETECT_FIELDS)
+    .disable(MapperFeature.AUTO_DETECT_SETTERS)
+    .disable(MapperFeature.AUTO_DETECT_GETTERS)
+    .disable(MapperFeature.AUTO_DETECT_IS_GETTERS)
+    .disable(MapperFeature.USE_GETTERS_AS_SETTERS)
+    .disable(MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS)
+    .disable(MapperFeature.INFER_PROPERTY_MUTATORS)
+    .disable(MapperFeature.ALLOW_FINAL_FIELDS_AS_MUTATORS)
     .registerModule(new Jdk8Module)
 
   override def getContext(aClass: Class[_]): ObjectMapper = mapper
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/StatementResource.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/StatementResource.scala
index ee23c61f3..e051dbb23 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/StatementResource.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/StatementResource.scala
@@ -80,9 +80,8 @@ private[v1] class StatementResource extends ApiRequestContext with Logging {
       case e: Exception =>
         val errorMsg =
           s"Error submitting sql"
-        e.printStackTrace()
         error(errorMsg, e)
-        throw badRequest(BAD_REQUEST, errorMsg)
+        throw badRequest(BAD_REQUEST, errorMsg + "\n" + e.getMessage)
     }
   }
 
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/dto/Ok.java b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/dto/Ok.java
index 50d04609f..982baa2ef 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/dto/Ok.java
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/dto/Ok.java
@@ -20,6 +20,9 @@ package org.apache.kyuubi.server.trino.api.v1.dto;
 
 
 import java.util.Objects;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
 import org.apache.commons.lang3.builder.ReflectionToStringBuilder;
 import org.apache.commons.lang3.builder.ToStringStyle;
 
@@ -28,10 +31,16 @@ public class Ok {
 
     public Ok() {}
 
-    public Ok(String content) {
+    /**
+     * Follow Trino way that explicitly specifies the json property since we disable the jackson
+     * auto detect feature. See {@link org.apache.kyuubi.server.trino.api.TrinoScalaObjectMapper}
+     */
+    @JsonCreator
+    public Ok(@JsonProperty("content") String content) {
         this.content = content;
     }
 
+    @JsonProperty
     public String getContent() {
         return content;
     }