You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by mo...@apache.org on 2023/01/16 06:09:14 UTC

[doris] branch branch-1.2-lts updated (d054ab9dc7 -> 5142f30e80)

This is an automated email from the ASF dual-hosted git repository.

morningman pushed a change to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git


    from d054ab9dc7 [feature-wip](multi-catalog) support automatic sync hive metastore events (#15401)
     new d64fc25e0e [improvement](multi-catalog) support hive 1.x  (#15886)
     new f2ec5dde8c [feature](multi-catalog) support clickhouse jdbc catalog (#15780)
     new ee329692fe [feature](multi-catalog) support oracle jdbc catalog (#15862)
     new 6bde53b102 [fix](multi-catalog) fix bug that replay init catalog may happen after catalog is dropped (#15919)
     new 870d86690b [Bugfix] (ROLLUP) fix the coredump when add rollup by link schema change (#15654)
     new 5142f30e80 [Enhencement](jdbc scanner) add profile for jdbc scanner (#15914)

The 6 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 be/src/olap/schema_change.cpp                      |   18 +
 be/src/vec/exec/scan/new_jdbc_scanner.cpp          |   13 +-
 be/src/vec/exec/scan/new_jdbc_scanner.h            |   10 +
 be/src/vec/exec/scan/vscan_node.h                  |    1 +
 be/src/vec/exec/vjdbc_connector.cpp                |   41 +-
 be/src/vec/exec/vjdbc_connector.h                  |    6 +
 build.sh                                           |    1 +
 .../docker-compose/oracle/init/01-drop-user.sql    |   18 +
 .../docker-compose/oracle/init/02-create-user.sql  |   19 +
 .../docker-compose/oracle/init/03-create-table.sql |   70 +
 .../docker-compose/oracle/init/04-insert.sql       |   48 +
 .../docker-compose/oracle/oracle-11.env            |   19 +
 .../docker-compose/oracle/oracle-11.yaml           |   49 +
 docker/thirdparties/start-thirdparties-docker.sh   |    7 +
 docker/thirdparties/stop-thirdparties-docker.sh    |    3 +
 .../docs/ecosystem/external-table/multi-catalog.md |   92 +-
 .../Create/CREATE-CATALOG.md                       |   52 +-
 .../docs/ecosystem/external-table/multi-catalog.md |   92 +-
 .../Create/CREATE-CATALOG.md                       |   52 +-
 .../java/org/apache/doris/catalog/HMSResource.java |    1 +
 .../org/apache/doris/catalog/JdbcResource.java     |    5 +
 .../doris/catalog/external/HMSExternalTable.java   |    2 +-
 .../org/apache/doris/datasource/CatalogMgr.java    |   18 +-
 .../doris/datasource/HMSClientException.java       |    5 +-
 .../doris/datasource/HMSExternalCatalog.java       |    1 +
 .../apache/doris/datasource/InternalCatalog.java   |    1 +
 .../doris/datasource/hive/HiveMetaStoreClient.java | 2758 ++++++++++++++++++++
 .../doris/datasource/hive/HiveVersionUtil.java     |   75 +
 .../{ => hive}/PooledHiveMetaStoreClient.java      |    4 +-
 .../org/apache/doris/external/jdbc/JdbcClient.java |  136 +-
 .../org/apache/doris/task/AlterReplicaTask.java    |   11 +
 fe/pom.xml                                         |    2 +-
 regression-test/conf/regression-conf.groovy        |    1 +
 .../jdbc_catalog_p0/test_oracle_jdbc_catalog.out   |   33 +
 .../data/rollup_p0/test_rollup_agg_date.out        |    2 +-
 .../test_oracle_jdbc_catalog.groovy                |   74 +
 .../link_schema_change/test_rollup_agg_fail.groovy |   70 +
 .../link_schema_change/test_rollup_dup_fail.groovy |   53 +
 .../link_schema_change/test_rollup_uni_fail.groovy |   53 +
 .../suites/rollup_p0/test_rollup_agg_date.groovy   |    2 +-
 .../test_agg_mv_schema_change.groovy               |    2 +-
 .../test_agg_rollup_schema_change.groovy           |    2 +-
 .../test_dup_mv_schema_change.groovy               |    2 +-
 .../test_dup_rollup_schema_change.groovy           |    2 +-
 .../test_uniq_mv_schema_change.groovy              |    2 +-
 .../test_uniq_rollup_schema_change.groovy          |    2 +-
 46 files changed, 3881 insertions(+), 49 deletions(-)
 create mode 100644 docker/thirdparties/docker-compose/oracle/init/01-drop-user.sql
 create mode 100644 docker/thirdparties/docker-compose/oracle/init/02-create-user.sql
 create mode 100644 docker/thirdparties/docker-compose/oracle/init/03-create-table.sql
 create mode 100644 docker/thirdparties/docker-compose/oracle/init/04-insert.sql
 create mode 100644 docker/thirdparties/docker-compose/oracle/oracle-11.env
 create mode 100644 docker/thirdparties/docker-compose/oracle/oracle-11.yaml
 create mode 100644 fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreClient.java
 create mode 100644 fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveVersionUtil.java
 rename fe/fe-core/src/main/java/org/apache/doris/datasource/{ => hive}/PooledHiveMetaStoreClient.java (98%)
 create mode 100644 regression-test/data/jdbc_catalog_p0/test_oracle_jdbc_catalog.out
 create mode 100644 regression-test/suites/jdbc_catalog_p0/test_oracle_jdbc_catalog.groovy
 create mode 100644 regression-test/suites/rollup_p0/link_schema_change/test_rollup_agg_fail.groovy
 create mode 100644 regression-test/suites/rollup_p0/link_schema_change/test_rollup_dup_fail.groovy
 create mode 100644 regression-test/suites/rollup_p0/link_schema_change/test_rollup_uni_fail.groovy


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[doris] 03/06: [feature](multi-catalog) support oracle jdbc catalog (#15862)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git

commit ee329692fea3a01f055783087ed3c21eedf893f6
Author: Tiewei Fang <43...@users.noreply.github.com>
AuthorDate: Sat Jan 14 00:01:33 2023 +0800

    [feature](multi-catalog) support oracle jdbc catalog (#15862)
---
 .../docker-compose/oracle/init/01-drop-user.sql    | 18 ++++++
 .../docker-compose/oracle/init/02-create-user.sql  | 19 ++++++
 .../docker-compose/oracle/init/03-create-table.sql | 70 ++++++++++++++++++++
 .../docker-compose/oracle/init/04-insert.sql       | 48 ++++++++++++++
 .../docker-compose/oracle/oracle-11.env            | 19 ++++++
 .../docker-compose/oracle/oracle-11.yaml           | 49 ++++++++++++++
 docker/thirdparties/start-thirdparties-docker.sh   |  7 ++
 docker/thirdparties/stop-thirdparties-docker.sh    |  3 +
 .../docs/ecosystem/external-table/multi-catalog.md | 45 ++++++++++++-
 .../Create/CREATE-CATALOG.md                       | 28 +++++++-
 .../docs/ecosystem/external-table/multi-catalog.md | 45 ++++++++++++-
 .../Create/CREATE-CATALOG.md                       | 28 +++++++-
 .../org/apache/doris/catalog/JdbcResource.java     |  2 +
 .../org/apache/doris/external/jdbc/JdbcClient.java | 73 ++++++++++++++++++---
 regression-test/conf/regression-conf.groovy        |  1 +
 .../jdbc_catalog_p0/test_oracle_jdbc_catalog.out   | 33 ++++++++++
 .../test_oracle_jdbc_catalog.groovy                | 74 ++++++++++++++++++++++
 17 files changed, 543 insertions(+), 19 deletions(-)

diff --git a/docker/thirdparties/docker-compose/oracle/init/01-drop-user.sql b/docker/thirdparties/docker-compose/oracle/init/01-drop-user.sql
new file mode 100644
index 0000000000..a47a923f97
--- /dev/null
+++ b/docker/thirdparties/docker-compose/oracle/init/01-drop-user.sql
@@ -0,0 +1,18 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--   http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied.  See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+drop user doris_test CASCADE;
diff --git a/docker/thirdparties/docker-compose/oracle/init/02-create-user.sql b/docker/thirdparties/docker-compose/oracle/init/02-create-user.sql
new file mode 100644
index 0000000000..dc59e57b78
--- /dev/null
+++ b/docker/thirdparties/docker-compose/oracle/init/02-create-user.sql
@@ -0,0 +1,19 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--   http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied.  See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+create user doris_test identified by 123456;
+grant connect, resource to doris_test;
diff --git a/docker/thirdparties/docker-compose/oracle/init/03-create-table.sql b/docker/thirdparties/docker-compose/oracle/init/03-create-table.sql
new file mode 100644
index 0000000000..efd648dff6
--- /dev/null
+++ b/docker/thirdparties/docker-compose/oracle/init/03-create-table.sql
@@ -0,0 +1,70 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--   http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied.  See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+create table doris_test.student (
+id number(5),
+name varchar2(20),
+age number(2),
+score number(3,1)
+);
+
+create table doris_test.test_num (
+id int,
+n1 number,
+n2 number(38),
+n3 number(9,2),
+n4 int,
+n5 smallint,
+n6 decimal(5,2),
+n7 float,
+n8 float(2),
+n9 real
+);
+
+create table doris_test.test_int (
+id int,
+tinyint_value1 number(2,0),
+smallint_value1 number(4,0),
+int_value1 number(9,0),
+bigint_value1 number(18,0),
+tinyint_value2 number(3,0),
+smallint_value2 number(5,0),
+int_value2 number(10,0),
+bigint_value2 number(19,0)
+);
+
+create table doris_test.test_char (
+id int,
+country char,
+city nchar(6),
+address varchar2(4000),
+name nvarchar2(6),
+remark long
+);
+
+create table doris_test.test_raw (
+id int,
+raw_value raw(20),
+long_raw_value long raw
+);
+
+create table doris_test.test_date (
+id int,
+t1 date,
+t2 interval year(3) to month,
+t3 interval day(3) to second(6)
+);
diff --git a/docker/thirdparties/docker-compose/oracle/init/04-insert.sql b/docker/thirdparties/docker-compose/oracle/init/04-insert.sql
new file mode 100644
index 0000000000..fd6ea2a57c
--- /dev/null
+++ b/docker/thirdparties/docker-compose/oracle/init/04-insert.sql
@@ -0,0 +1,48 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--   http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied.  See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+insert into doris_test.student values (1, 'alice', 20, 99.5);
+insert into doris_test.student values (2, 'bob', 21, 90.5);
+insert into doris_test.student values (3, 'jerry', 23, 88.0);
+insert into doris_test.student values (4, 'andy', 21, 93);
+
+insert into doris_test.test_num values
+(1, 111, 123, 7456123.89, 573, 34, 673.43, 34.1264, 56.2, 23.231);
+
+insert into doris_test.test_int values
+(1, 99, 9999, 999999999, 999999999999999999, 999, 99999, 9999999999, 9999999999999999999);
+insert into doris_test.test_int values
+(2, -99, -9999, -999999999, -999999999999999999, -999, -99999, -9999999999, -9999999999999999999);
+insert into doris_test.test_int values
+(3, 9.9, 99.99, 999999999, 999999999999999999, 999, 99999, 9999999999, 9999999999999999999);
+
+
+insert into doris_test.test_char values (1, '1', 'china', 'beijing', 'alice', 'abcdefghrjkmnopq');
+insert into doris_test.test_char values (2, '2', 'china', 'shanghai', 'bob', 'abcdefghrjkmnopq');
+insert into doris_test.test_char values (3, '3', 'Americ', 'new york', 'Jerry', 'abcdefghrjkmnopq');
+
+
+insert into doris_test.test_raw values (1, hextoraw('ffff'), hextoraw('aaaa'));
+insert into doris_test.test_raw values (2, utl_raw.cast_to_raw('beijing'), utl_raw.cast_to_raw('shanghai'));
+
+insert into doris_test.test_date (id, t1) values (1, to_date('2022-1-21 5:23:01','yyyy-mm-dd hh24:mi:ss'));
+insert into doris_test.test_date (id, t1) values (2, to_date('20221112203256', 'yyyymmddhh24miss'));
+insert into doris_test.test_date (id, t2) values (3, interval '11' year);
+insert into doris_test.test_date (id, t2) values (4, interval '223-9' year(3) to month);
+insert into doris_test.test_date (id, t3) values (5, interval '12 10:23:01.1234568' day to second);
+
+commit;
diff --git a/docker/thirdparties/docker-compose/oracle/oracle-11.env b/docker/thirdparties/docker-compose/oracle/oracle-11.env
new file mode 100644
index 0000000000..f3d1f22efc
--- /dev/null
+++ b/docker/thirdparties/docker-compose/oracle/oracle-11.env
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+DOCKER_ORACLE_EXTERNAL_PORT=1521
diff --git a/docker/thirdparties/docker-compose/oracle/oracle-11.yaml b/docker/thirdparties/docker-compose/oracle/oracle-11.yaml
new file mode 100644
index 0000000000..93225aacd3
--- /dev/null
+++ b/docker/thirdparties/docker-compose/oracle/oracle-11.yaml
@@ -0,0 +1,49 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+version: '3'
+
+services:
+  doris--oracle_11:
+    image: oracleinanutshell/oracle-xe-11g:latest
+    restart: always
+    ports:
+      - ${DOCKER_ORACLE_EXTERNAL_PORT}:1521
+    privileged: true
+    healthcheck:
+      test: [ "CMD", "bash", "-c", "echo 'select 1 from dual;' | ORACLE_HOME=/u01/app/oracle/product/11.2.0/xe /u01/app/oracle/product/11.2.0/xe/bin/sqlplus -s DORIS_TEST/123456@localhost"]
+      interval: 20s
+      timeout: 60s
+      retries: 120
+    volumes:
+      - ./init:/docker-entrypoint-initdb.d
+    environment:
+      - ORACLE_ALLOW_REMOTE=true
+      - ORACLE_ENABLE_XDB=true
+      - DBCA_TOTAL_MEMORY=2048
+      - IMPORT_FROM_VOLUME=true
+      - TZ=Asia/Shanghai
+    networks:
+      - doris--oracle_11
+  hello-world:
+    image: hello-world
+    depends_on:
+      doris--oracle_11:
+        condition: service_healthy 
+
+networks:
+  doris--oracle_11:
\ No newline at end of file
diff --git a/docker/thirdparties/start-thirdparties-docker.sh b/docker/thirdparties/start-thirdparties-docker.sh
index 986d04da7a..61e0df9713 100755
--- a/docker/thirdparties/start-thirdparties-docker.sh
+++ b/docker/thirdparties/start-thirdparties-docker.sh
@@ -56,6 +56,13 @@ sudo mkdir -p "${ROOT}"/docker-compose/postgresql/data/data
 sudo rm "${ROOT}"/docker-compose/postgresql/data/data/* -rf
 sudo docker compose -f "${ROOT}"/docker-compose/postgresql/postgresql-14.yaml --env-file "${ROOT}"/docker-compose/postgresql/postgresql-14.env up -d
 
+# oracle
+sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/oracle/oracle-11.yaml
+sudo docker compose -f "${ROOT}"/docker-compose/oracle/oracle-11.yaml --env-file "${ROOT}"/docker-compose/oracle/oracle-11.env down
+sudo mkdir -p "${ROOT}"/docker-compose/oracle/data/
+sudo rm "${ROOT}"/docker-compose/oracle/data/* -rf
+sudo docker compose -f "${ROOT}"/docker-compose/oracle/oracle-11.yaml --env-file "${ROOT}"/docker-compose/oracle/oracle-11.env up -d
+
 # hive
 # before start it, you need to download parquet file package, see "README" in "docker-compose/hive/scripts/"
 sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/hive/hive-2x.yaml
diff --git a/docker/thirdparties/stop-thirdparties-docker.sh b/docker/thirdparties/stop-thirdparties-docker.sh
index 4ed3e78c18..dc2c5773d8 100755
--- a/docker/thirdparties/stop-thirdparties-docker.sh
+++ b/docker/thirdparties/stop-thirdparties-docker.sh
@@ -33,5 +33,8 @@ sudo docker compose -f "${ROOT}"/docker-compose/mysql/mysql-5.7.yaml --env-file
 # pg 14
 sudo docker compose -f "${ROOT}"/docker-compose/postgresql/postgresql-14.yaml --env-file "${ROOT}"/docker-compose/postgresql/postgresql-14.env down
 
+# oracle 11
+sudo docker compose -f "${ROOT}"/docker-compose/oracle/oracle-11.yaml --env-file "${ROOT}"/docker-compose/oracle/oracle-11.env down
+
 # hive
 sudo docker compose -f "${ROOT}"/docker-compose/hive/hive-2x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive.env down
diff --git a/docs/en/docs/ecosystem/external-table/multi-catalog.md b/docs/en/docs/ecosystem/external-table/multi-catalog.md
index 0d0439ec2b..89ac6b22e3 100644
--- a/docs/en/docs/ecosystem/external-table/multi-catalog.md
+++ b/docs/en/docs/ecosystem/external-table/multi-catalog.md
@@ -430,7 +430,7 @@ CREATE CATALOG jdbc PROPERTIES (
 **CLICKHOUSE catalog example**
 
 ```sql
--- 1.2.0+ Version
+-- The first way
 CREATE RESOURCE clickhouse_resource PROPERTIES (
     "type"="jdbc",
     "user"="default",
@@ -441,7 +441,7 @@ CREATE RESOURCE clickhouse_resource PROPERTIES (
 )
 CREATE CATALOG jdbc WITH RESOURCE clickhouse_resource;
 
--- 1.2.0 Version
+-- The second way, note: keys have 'jdbc' prefix in front.
 CREATE CATALOG jdbc PROPERTIES (
     "type"="jdbc",
     "jdbc.jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
@@ -449,6 +449,31 @@ CREATE CATALOG jdbc PROPERTIES (
 )
 ```
 
+**oracle catalog example**
+
+```sql
+-- The first way
+CREATE RESOURCE oracle_resource PROPERTIES (
+    "type"="jdbc",
+    "user"="doris",
+    "password"="123456",
+    "jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:1521:helowin",
+    "driver_url" = "file:/path/to/ojdbc6.jar",
+    "driver_class" = "oracle.jdbc.driver.OracleDriver"
+);
+CREATE CATALOG jdbc WITH RESOURCE oracle_resource;
+
+-- The second way, note: keys have 'jdbc' prefix in front.
+CREATE CATALOG jdbc PROPERTIES (
+    "type"="jdbc",
+    "jdbc.user"="doris",
+    "jdbc.password"="123456",
+    "jdbc.jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:1521:helowin",
+    "jdbc.driver_url" = "file:/path/to/ojdbc6.jar",
+    "jdbc.driver_class" = "oracle.jdbc.driver.OracleDriver"
+);	
+```
+
 Where `jdbc.driver_url` can be a remote jar package
 
 ```sql
@@ -481,7 +506,9 @@ MySQL [(none)]> show catalogs;
 2 rows in set (0.02 sec)
 ```
 
-> Note: In the `postgresql catalog`, a database for doris corresponds to a schema in the postgresql specified catalog (specified in the `jdbc.jdbc_url` parameter), tables under this database corresponds to tables under this postgresql's schema.
+> Note: 
+> 1. In the `postgresql catalog`, a database for doris corresponds to a schema in the postgresql specified catalog (specified in the `jdbc.jdbc_url` parameter), tables under this database corresponds to tables under this postgresql's schema.
+> 2. In the `oracle catalog`, a database for doris corresponds to a user in the oracle, tables under this database corresponds to tables under this oracle's user.
 
 Switch to the jdbc catalog with the `SWITCH` command and view the databases in it:
 
@@ -764,6 +791,18 @@ For Hive/Iceberge/Hudi
 | DECIMAL                | DECIMAL    | Data that exceeds Doris's maximum Decimal precision is mapped to a STRING                                                            |
 | Enum/IPv4/IPv6/UUID    | STRING     | In the display of IPv4 and IPv6, an extra `/` is displayed before the data, which needs to be processed by the `split_part` function |
 
+#### ORACLE
+ ORACLE Type | Doris Type | Comment |
+|---|---|---|
+| number(p) / number(p,0) |  | Doris will choose the corresponding doris type based on the p: p<3 -> TINYINT; p<5 -> SMALLINT; p<10 -> INT; p<19 -> BIGINT; p>19 -> LARGEINT |
+| number(p,s) | DECIMAL | |
+| decimal | DECIMAL | |
+| float/real | DOUBLE | |
+| DATE | DATETIME | |
+| CHAR/NCHAR | CHAR | |
+| VARCHAR2/NVARCHAR2 | VARCHAR | |
+| LONG/ RAW/ LONG RAW/ INTERVAL | TEXT | |
+
 ## Privilege Management
 
 Using Doris to access the databases and tables in the External Catalog is not controlled by the permissions of the external data source itself, but relies on Doris's own permission access management.
diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
index 33e9e4af9a..b659e3a7fc 100644
--- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
+++ b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
@@ -148,7 +148,7 @@ CREATE CATALOG catalog_name PROPERTIES (
 	**postgresql**
 
 	```sql
-	-- 1.2.0+ Version
+	-- The first way
 	CREATE RESOURCE pg_resource PROPERTIES (
 		"type"="jdbc",
 		"user"="postgres",
@@ -159,7 +159,7 @@ CREATE CATALOG catalog_name PROPERTIES (
 	);
 	CREATE CATALOG jdbc WITH RESOURCE pg_resource;
 
-	-- 1.2.0 Version
+	-- The second way, note: keys have 'jdbc' prefix in front.
 	CREATE CATALOG jdbc PROPERTIES (
 		"type"="jdbc",
 		"jdbc.user"="postgres",
@@ -192,6 +192,30 @@ CREATE CATALOG catalog_name PROPERTIES (
 	)
 	```
 
+	**oracle**
+	```sql
+	-- The first way
+	CREATE RESOURCE oracle_resource PROPERTIES (
+		"type"="jdbc",
+		"user"="doris",
+		"password"="123456",
+		"jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:1521:helowin",
+		"driver_url" = "file:/path/to/ojdbc6.jar",
+		"driver_class" = "oracle.jdbc.driver.OracleDriver"
+	);
+	CREATE CATALOG jdbc WITH RESOURCE oracle_resource;
+
+	-- The second way, note: keys have 'jdbc' prefix in front.
+	CREATE CATALOG jdbc PROPERTIES (
+		"type"="jdbc",
+		"jdbc.user"="doris",
+		"jdbc.password"="123456",
+		"jdbc.jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:1521:helowin",
+		"jdbc.driver_url" = "file:/path/to/ojdbc6.jar",
+		"jdbc.driver_class" = "oracle.jdbc.driver.OracleDriver"
+	);	
+	```
+
 ### Keywords
 
 CREATE, CATALOG
diff --git a/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md b/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md
index 8192eea436..ab3974fe30 100644
--- a/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md
+++ b/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md
@@ -503,7 +503,7 @@ CREATE CATALOG jdbc PROPERTIES (
 **CLICKHOUSE catalog示例**
 
 ```sql
--- 1.2.0+ 版本
+-- 方式一
 CREATE RESOURCE clickhouse_resource PROPERTIES (
     "type"="jdbc",
     "user"="default",
@@ -514,7 +514,7 @@ CREATE RESOURCE clickhouse_resource PROPERTIES (
 )
 CREATE CATALOG jdbc WITH RESOURCE clickhouse_resource;
 
--- 1.2.0 版本
+-- 方式二,注意有jdbc前缀
 CREATE CATALOG jdbc PROPERTIES (
     "type"="jdbc",
     "jdbc.jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
@@ -522,6 +522,31 @@ CREATE CATALOG jdbc PROPERTIES (
 )
 ```
 
+**ORACLE catalog示例**
+
+```sql
+-- 方式一
+CREATE RESOURCE oracle_resource PROPERTIES (
+    "type"="jdbc",
+    "user"="doris",
+    "password"="123456",
+    "jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:1521:helowin",
+    "driver_url" = "file:/path/to/ojdbc6.jar",
+    "driver_class" = "oracle.jdbc.driver.OracleDriver"
+);
+CREATE CATALOG jdbc WITH RESOURCE oracle_resource;
+
+-- 方式二,注意有jdbc前缀
+CREATE CATALOG jdbc PROPERTIES (
+    "type"="jdbc",
+    "jdbc.user"="doris",
+    "jdbc.password"="123456",
+    "jdbc.jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:1521:helowin",
+    "jdbc.driver_url" = "file:/path/to/ojdbc6.jar",
+    "jdbc.driver_class" = "oracle.jdbc.driver.OracleDriver"
+);	
+```
+
 其中`jdbc.driver_url`可以是远程jar包:
 
 ```sql
@@ -576,7 +601,9 @@ MySQL [(none)]> show databases;
 9 rows in set (0.67 sec)
 ```
 
-> 注意:在postgresql catalog中,doris的一个database对应于postgresql中指定catalog(`jdbc.jdbc_url`参数中指定的catalog)下的一个schema,database下的tables则对应于postgresql该schema下的tables。
+> 注意:
+> 1. 在postgresql catalog中,doris的一个database对应于postgresql中指定catalog(`jdbc.jdbc_url`参数中指定的catalog)下的一个schema,database下的tables则对应于postgresql该schema下的tables。
+> 2. 在oracle catalog中,doris的一个database对应于oracle中的一个user,database下的tables则对应于oracle该user下的有权限访问的tables。
 
 查看`db1`数据库下的表,并查询:
 ```sql
@@ -763,6 +790,18 @@ select k1, k4 from table;           // Query OK.
 | DECIMAL                | DECIMAL    | 对于超过了Doris最大的Decimal精度的数据,将映射为STRING                |
 | Enum/IPv4/IPv6/UUID    | STRING     | 在显示上IPv4,IPv6会额外在数据最前面显示一个`/`,需要自己用`split_part`函数处理 |
 
+#### ORACLE
+ ORACLE Type | Doris Type | Comment |
+|---|---|---|
+| number(p) / number(p,0) |  | Doris会根据p的大小来选择对应的类型:p<3 -> TINYINT; p<5 -> SMALLINT; p<10 -> INT; p<19 -> BIGINT; p>19 -> LARGEINT |
+| number(p,s) | DECIMAL | |
+| decimal | DECIMAL | |
+| float/real | DOUBLE | |
+| DATE | DATETIME | |
+| CHAR/NCHAR | CHAR | |
+| VARCHAR2/NVARCHAR2 | VARCHAR | |
+| LONG/ RAW/ LONG RAW/ INTERVAL | TEXT | |
+
 ## 权限管理
 
 使用 Doris 对 External Catalog 中库表进行访问,并不受外部数据目录自身的权限控制,而是依赖 Doris 自身的权限访问管理功能。
diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
index 81ee7b30cb..9c71ee73a8 100644
--- a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
+++ b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
@@ -154,7 +154,7 @@ CREATE CATALOG catalog_name PROPERTIES (
 	**postgresql**
 
 	```sql
-	-- 1.2.0+ 版本
+	-- 方式一
 	CREATE RESOURCE pg_resource PROPERTIES (
 		"type"="jdbc",
 		"user"="postgres",
@@ -165,7 +165,7 @@ CREATE CATALOG catalog_name PROPERTIES (
 	);
 	CREATE CATALOG jdbc WITH RESOURCE pg_resource;
 
-	-- 1.2.0 版本
+	-- 方式二,注意有jdbc前缀
 	CREATE CATALOG jdbc PROPERTIES (
 		"type"="jdbc",
 		"jdbc.user"="postgres",
@@ -198,6 +198,30 @@ CREATE CATALOG catalog_name PROPERTIES (
    )
    ```
 
+	**oracle**
+	```sql
+	-- 方式一
+	CREATE RESOURCE oracle_resource PROPERTIES (
+		"type"="jdbc",
+		"user"="doris",
+		"password"="123456",
+		"jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:1521:helowin",
+		"driver_url" = "file:/path/to/ojdbc6.jar",
+		"driver_class" = "oracle.jdbc.driver.OracleDriver"
+	);
+	CREATE CATALOG jdbc WITH RESOURCE oracle_resource;
+
+	-- 方式二,注意有jdbc前缀
+	CREATE CATALOG jdbc PROPERTIES (
+		"type"="jdbc",
+		"jdbc.user"="doris",
+		"jdbc.password"="123456",
+		"jdbc.jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:1521:helowin",
+		"jdbc.driver_url" = "file:/path/to/ojdbc6.jar",
+		"jdbc.driver_class" = "oracle.jdbc.driver.OracleDriver"
+	);	
+	```
+
 ### Keywords
 
 CREATE, CATALOG
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java
index 3862a38e3b..add3009de8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java
@@ -66,6 +66,7 @@ public class JdbcResource extends Resource {
     public static final String JDBC_ORACLE = "jdbc:oracle";
     public static final String JDBC_SQLSERVER = "jdbc:sqlserver";
     public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse";
+
     public static final String MYSQL = "MYSQL";
     public static final String POSTGRESQL = "POSTGRESQL";
     public static final String ORACLE = "ORACLE";
@@ -253,6 +254,7 @@ public class JdbcResource extends Resource {
             // it will convert to Doris tinyint, not bit.
             newJdbcUrl = checkJdbcUrlParam(newJdbcUrl, "yearIsDateType", "true", "false");
             newJdbcUrl = checkJdbcUrlParam(newJdbcUrl, "tinyInt1isBit", "true", "false");
+            newJdbcUrl = checkJdbcUrlParam(newJdbcUrl, "useCursorFetch", "false", "true");
         }
         return newJdbcUrl;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/jdbc/JdbcClient.java b/fe/fe-core/src/main/java/org/apache/doris/external/jdbc/JdbcClient.java
index fa10819a7b..cc8fc80b3d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/external/jdbc/JdbcClient.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/external/jdbc/JdbcClient.java
@@ -157,17 +157,18 @@ public class JdbcClient {
             stmt = conn.createStatement();
             switch (dbType) {
                 case JdbcResource.MYSQL:
+                case JdbcResource.CLICKHOUSE:
                     rs = stmt.executeQuery("SHOW DATABASES");
                     break;
                 case JdbcResource.POSTGRESQL:
                     rs = stmt.executeQuery("SELECT schema_name FROM information_schema.schemata "
                             + "where schema_owner='" + jdbcUser + "';");
                     break;
-                case JdbcResource.CLICKHOUSE:
-                    rs = stmt.executeQuery("SHOW DATABASES");
+                case JdbcResource.ORACLE:
+                    rs = stmt.executeQuery("SELECT DISTINCT OWNER FROM all_tables");
                     break;
                 default:
-                    throw  new JdbcClientException("Not supported jdbc type");
+                    throw new JdbcClientException("Not supported jdbc type");
             }
 
             while (rs.next()) {
@@ -196,8 +197,7 @@ public class JdbcClient {
                     rs = databaseMetaData.getTables(dbName, null, null, types);
                     break;
                 case JdbcResource.POSTGRESQL:
-                    rs = databaseMetaData.getTables(null, dbName, null, types);
-                    break;
+                case JdbcResource.ORACLE:
                 case JdbcResource.CLICKHOUSE:
                     rs = databaseMetaData.getTables(null, dbName, null, types);
                     break;
@@ -226,8 +226,7 @@ public class JdbcClient {
                     rs = databaseMetaData.getTables(dbName, null, tableName, types);
                     break;
                 case JdbcResource.POSTGRESQL:
-                    rs = databaseMetaData.getTables(null, dbName, null, types);
-                    break;
+                case JdbcResource.ORACLE:
                 case JdbcResource.CLICKHOUSE:
                     rs = databaseMetaData.getTables(null, dbName, null, types);
                     break;
@@ -297,8 +296,7 @@ public class JdbcClient {
                     rs = databaseMetaData.getColumns(dbName, null, tableName, null);
                     break;
                 case JdbcResource.POSTGRESQL:
-                    rs = databaseMetaData.getColumns(null, dbName, tableName, null);
-                    break;
+                case JdbcResource.ORACLE:
                 case JdbcResource.CLICKHOUSE:
                     rs = databaseMetaData.getColumns(null, dbName, tableName, null);
                     break;
@@ -334,6 +332,8 @@ public class JdbcClient {
                 return postgresqlTypeToDoris(fieldSchema);
             case JdbcResource.CLICKHOUSE:
                 return clickhouseTypeToDoris(fieldSchema);
+            case JdbcResource.ORACLE:
+                return oracleTypeToDoris(fieldSchema);
             default:
                 throw new JdbcClientException("Unknown database type");
         }
@@ -563,6 +563,61 @@ public class JdbcClient {
         // Todo(zyk): Wait the JDBC external table support the array type then supported clickhouse array type
     }
 
+    public Type oracleTypeToDoris(JdbcFieldSchema fieldSchema) {
+        String oracleType = fieldSchema.getDataTypeName();
+        if (oracleType.startsWith("INTERVAL")) {
+            oracleType = oracleType.substring(0, 8);
+        }
+        switch (oracleType) {
+            case "NUMBER":
+                int precision = fieldSchema.getColumnSize();
+                int scale = fieldSchema.getDecimalDigits();
+                if (scale == 0) {
+                    if (precision < 3) {
+                        return Type.TINYINT;
+                    } else if (precision < 5) {
+                        return Type.SMALLINT;
+                    } else if (precision < 10) {
+                        return Type.INT;
+                    } else if (precision < 19) {
+                        return Type.BIGINT;
+                    } else if (precision < 39) {
+                        return Type.LARGEINT;
+                    }
+                    return ScalarType.createStringType();
+                }
+                if (precision <= ScalarType.MAX_DECIMAL128_PRECISION) {
+                    if (!Config.enable_decimal_conversion && precision > ScalarType.MAX_DECIMALV2_PRECISION) {
+                        return ScalarType.createStringType();
+                    }
+                    return ScalarType.createDecimalType(precision, scale);
+                } else {
+                    return ScalarType.createStringType();
+                }
+            case "FLOAT":
+                return Type.DOUBLE;
+            case "DATE":
+                return ScalarType.getDefaultDateType(Type.DATETIME);
+            case "VARCHAR2":
+            case "NVARCHAR2":
+            case "CHAR":
+            case "NCHAR":
+            case "LONG":
+            case "RAW":
+            case "LONG RAW":
+            case "INTERVAL":
+                return ScalarType.createStringType();
+            case "BLOB":
+            case "CLOB":
+            case "NCLOB":
+            case "BFILE":
+            case "BINARY_FLOAT":
+            case "BINARY_DOUBLE":
+            default:
+                return Type.UNSUPPORTED;
+        }
+    }
+
     public List<Column> getColumnsFromJdbc(String dbName, String tableName) {
         List<JdbcFieldSchema> jdbcTableSchema = getJdbcColumnsInfo(dbName, tableName);
         List<Column> dorisTableSchema = Lists.newArrayListWithCapacity(jdbcTableSchema.size());
diff --git a/regression-test/conf/regression-conf.groovy b/regression-test/conf/regression-conf.groovy
index cc07c068c4..0b743fe9dd 100644
--- a/regression-test/conf/regression-conf.groovy
+++ b/regression-test/conf/regression-conf.groovy
@@ -75,6 +75,7 @@ sk=""
 enableJdbcTest=false
 mysql_57_port=3316
 pg_14_port=5442
+oracle_11_port=1521
 
 // hive catalog test config
 // To enable jdbc test, you need first start hive container.
diff --git a/regression-test/data/jdbc_catalog_p0/test_oracle_jdbc_catalog.out b/regression-test/data/jdbc_catalog_p0/test_oracle_jdbc_catalog.out
new file mode 100644
index 0000000000..0fe8aa929c
--- /dev/null
+++ b/regression-test/data/jdbc_catalog_p0/test_oracle_jdbc_catalog.out
@@ -0,0 +1,33 @@
+-- This file is automatically generated. You should know what you did if you want to edit this
+-- !test0 --
+1	alice	20	99.5
+2	bob	21	90.5
+3	jerry	23	88.0
+4	andy	21	93.0
+
+-- !in_tb --
+1	alice	20
+2	bob	21
+3	jerry	23
+4	andy	21
+
+-- !test1 --
+1	111	123	7456123.89	573	34	673.43	34.1264	60.0	23.231
+
+-- !test2 --
+1	1	china 	beijing	alice	abcdefghrjkmnopq
+2	2	china 	shanghai	bob	abcdefghrjkmnopq
+3	3	Americ	new york	Jerry	abcdefghrjkmnopq
+
+-- !test3 --
+1	99	9999	999999999	999999999999999999	999	99999	9999999999	9999999999999999999
+2	-99	-9999	-999999999	-999999999999999999	-999	-99999	-9999999999	-9999999999999999999
+3	10	100	999999999	999999999999999999	999	99999	9999999999	9999999999999999999
+
+-- !test5 --
+1	2022-01-21T05:23:01	\N	\N
+2	2022-11-12T20:32:56	\N	\N
+3	\N	11-0	\N
+4	\N	223-9	\N
+5	\N	\N	12 10:23:1.123457000
+
diff --git a/regression-test/suites/jdbc_catalog_p0/test_oracle_jdbc_catalog.groovy b/regression-test/suites/jdbc_catalog_p0/test_oracle_jdbc_catalog.groovy
new file mode 100644
index 0000000000..9d473e7598
--- /dev/null
+++ b/regression-test/suites/jdbc_catalog_p0/test_oracle_jdbc_catalog.groovy
@@ -0,0 +1,74 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_oracle_jdbc_catalog", "p0") {
+    String enabled = context.config.otherConfigs.get("enableJdbcTest");
+    if (enabled != null && enabled.equalsIgnoreCase("true")) {
+        String resource_name = "oracle_catalog_resource";
+        String catalog_name = "oracle_catalog";
+        String internal_db_name = "regression_test_jdbc_catalog_p0";
+        String ex_db_name = "DORIS_TEST";
+        String oracle_port = context.config.otherConfigs.get("oracle_11_port");
+        String SID = "XE"
+
+        String inDorisTable = "doris_in_tb";
+
+        sql """drop catalog if exists ${catalog_name} """
+        sql """drop resource if exists ${resource_name}"""
+
+        sql """create resource if not exists ${resource_name} properties(
+                    "type"="jdbc",
+                    "user"="doris_test",
+                    "password"="123456",
+                    "jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:${oracle_port}:${SID}",
+                    "driver_url" = "https://doris-community-test-1308700295.cos.ap-hongkong.myqcloud.com/jdbc_driver/ojdbc6.jar",
+                    "driver_class" = "oracle.jdbc.driver.OracleDriver"
+        );"""
+
+        sql """CREATE CATALOG ${catalog_name} WITH RESOURCE ${resource_name}"""
+
+        sql  """ drop table if exists ${inDorisTable} """
+        sql  """
+              CREATE TABLE ${inDorisTable} (
+                `id` INT NULL COMMENT "主键id",
+                `name` string NULL COMMENT "名字",
+                `age` INT NULL COMMENT "年龄"
+                ) DISTRIBUTED BY HASH(id) BUCKETS 10
+                PROPERTIES("replication_num" = "1");
+        """
+
+        sql """switch ${catalog_name}"""
+        sql """ use ${ex_db_name}"""
+
+        order_qt_test0  """ select * from STUDENT order by ID; """
+        sql  """ insert into internal.${internal_db_name}.${inDorisTable} select ID, NAME, AGE from STUDENT; """
+        order_qt_in_tb  """ select id, name, age from internal.${internal_db_name}.${inDorisTable} order by id; """
+
+        order_qt_test1  """ select * from TEST_NUM order by ID; """
+        order_qt_test2  """ select * from TEST_CHAR order by ID; """
+        order_qt_test3  """ select * from TEST_INT order by ID; """
+        order_qt_test5  """ select * from TEST_DATE order by ID; """
+
+        // The result of TEST_RAW will change
+        // So instead of qt, we're using sql here.
+        sql  """ select * from TEST_RAW order by ID; """
+
+
+        sql """drop catalog if exists ${catalog_name} """
+        sql """drop resource if exists jdbc_resource_catalog_pg"""
+    }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[doris] 05/06: [Bugfix] (ROLLUP) fix the coredump when add rollup by link schema change (#15654)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 870d86690b8978a484359ff9e725bec717d75785
Author: Lightman <31...@users.noreply.github.com>
AuthorDate: Sat Jan 14 10:20:07 2023 +0800

    [Bugfix] (ROLLUP) fix the coredump when add rollup by link schema change (#15654)
    
    Because of the rollup has the same keys and the keys's order is same, BE will do linked schema change. The base tablet's segments will link to the new rollup tablet. But the unique id from the base tablet is starting from 0 and as the rollup tablet also. In this case, the unique id 4 in the base table is column 'city', but in the rollup tablet is 'cost'. It will decode the varcode page to bigint page so that be coredump. It needs to be rejected.
    
    I think that if a rollup add by link schema change, it means this rollup is redundant. It brings no additional revenue and wastes storage space. So It needs to be rejected.
---
 be/src/olap/schema_change.cpp                      | 18 ++++++
 .../org/apache/doris/task/AlterReplicaTask.java    | 11 ++++
 .../data/rollup_p0/test_rollup_agg_date.out        |  2 +-
 .../link_schema_change/test_rollup_agg_fail.groovy | 70 ++++++++++++++++++++++
 .../link_schema_change/test_rollup_dup_fail.groovy | 53 ++++++++++++++++
 .../link_schema_change/test_rollup_uni_fail.groovy | 53 ++++++++++++++++
 .../suites/rollup_p0/test_rollup_agg_date.groovy   |  2 +-
 .../test_agg_mv_schema_change.groovy               |  2 +-
 .../test_agg_rollup_schema_change.groovy           |  2 +-
 .../test_dup_mv_schema_change.groovy               |  2 +-
 .../test_dup_rollup_schema_change.groovy           |  2 +-
 .../test_uniq_mv_schema_change.groovy              |  2 +-
 .../test_uniq_rollup_schema_change.groovy          |  2 +-
 13 files changed, 213 insertions(+), 8 deletions(-)

diff --git a/be/src/olap/schema_change.cpp b/be/src/olap/schema_change.cpp
index 8af54320b2..55eb3e8203 100644
--- a/be/src/olap/schema_change.cpp
+++ b/be/src/olap/schema_change.cpp
@@ -1963,6 +1963,18 @@ Status SchemaChangeHandler::_do_process_alter_tablet_v2(const TAlterTabletReqV2&
         sc_params.ref_rowset_readers = rs_readers;
         sc_params.delete_handler = &delete_handler;
         sc_params.base_tablet_schema = base_tablet_schema;
+        DCHECK(request.__isset.alter_tablet_type);
+        switch (request.alter_tablet_type) {
+        case TAlterTabletType::SCHEMA_CHANGE:
+            sc_params.alter_tablet_type = AlterTabletType::SCHEMA_CHANGE;
+            break;
+        case TAlterTabletType::ROLLUP:
+            sc_params.alter_tablet_type = AlterTabletType::ROLLUP;
+            break;
+        case TAlterTabletType::MIGRATION:
+            sc_params.alter_tablet_type = AlterTabletType::MIGRATION;
+            break;
+        }
         if (request.__isset.materialized_view_params) {
             for (auto item : request.materialized_view_params) {
                 AlterMaterializedViewParam mv_param;
@@ -2172,6 +2184,12 @@ Status SchemaChangeHandler::_convert_historical_rowsets(const SchemaChangeParams
         return process_alter_exit();
     }
 
+    if (!sc_sorting && !sc_directly && sc_params.alter_tablet_type == AlterTabletType::ROLLUP) {
+        res = Status::Error<SCHEMA_SCHEMA_INVALID>(
+                "Don't support to add materialized view by linked schema change");
+        return process_alter_exit();
+    }
+
     // b. Generate historical data converter
     auto sc_procedure = get_sc_procedure(rb_changer, sc_sorting, sc_directly);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/AlterReplicaTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/AlterReplicaTask.java
index d9ac6f0bb4..191915833c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/task/AlterReplicaTask.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/task/AlterReplicaTask.java
@@ -24,6 +24,7 @@ import org.apache.doris.analysis.SlotRef;
 import org.apache.doris.catalog.Column;
 import org.apache.doris.thrift.TAlterMaterializedViewParam;
 import org.apache.doris.thrift.TAlterTabletReqV2;
+import org.apache.doris.thrift.TAlterTabletType;
 import org.apache.doris.thrift.TColumn;
 import org.apache.doris.thrift.TTaskType;
 
@@ -109,6 +110,16 @@ public class AlterReplicaTask extends AgentTask {
     public TAlterTabletReqV2 toThrift() {
         TAlterTabletReqV2 req = new TAlterTabletReqV2(baseTabletId, signature, baseSchemaHash, newSchemaHash);
         req.setAlterVersion(version);
+        switch (jobType) {
+            case ROLLUP:
+                req.setAlterTabletType(TAlterTabletType.ROLLUP);
+                break;
+            case SCHEMA_CHANGE:
+                req.setAlterTabletType(TAlterTabletType.SCHEMA_CHANGE);
+                break;
+            default:
+                break;
+        }
         if (defineExprs != null) {
             for (Map.Entry<String, Expr> entry : defineExprs.entrySet()) {
                 List<SlotRef> slots = Lists.newArrayList();
diff --git a/regression-test/data/rollup_p0/test_rollup_agg_date.out b/regression-test/data/rollup_p0/test_rollup_agg_date.out
index 887cd41a3c..c03adfe3f8 100644
--- a/regression-test/data/rollup_p0/test_rollup_agg_date.out
+++ b/regression-test/data/rollup_p0/test_rollup_agg_date.out
@@ -11,8 +11,8 @@ test_rollup_agg_date	AGG_KEYS	datek1	DATEV2	Yes	true	\N		true
 		datetimev4	DATETIMEV2(3)	Yes	false	\N	MAX	true
 								
 rollup_date	AGG_KEYS	datek1	DATEV2	Yes	true	\N		true
-		datetimek1	DATETIMEV2(0)	Yes	true	\N		true
 		datetimek2	DATETIMEV2(3)	Yes	true	\N		true
+		datetimek1	DATETIMEV2(0)	Yes	true	\N		true
 		datetimek3	DATETIMEV2(6)	Yes	true	\N		true
 		datev1	DATEV2	No	false	\N	MAX	true
 		datetimev1	DATETIMEV2(0)	No	false	\N	MAX	true
diff --git a/regression-test/suites/rollup_p0/link_schema_change/test_rollup_agg_fail.groovy b/regression-test/suites/rollup_p0/link_schema_change/test_rollup_agg_fail.groovy
new file mode 100644
index 0000000000..598b0dfed8
--- /dev/null
+++ b/regression-test/suites/rollup_p0/link_schema_change/test_rollup_agg_fail.groovy
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite ("test_rollup_add_fail") {
+    def tableName = "test_rollup_add_fail"
+
+    /* agg */
+    sql """ DROP TABLE IF EXISTS ${tableName} FORCE"""
+    sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+
+                `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间",
+                `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列",
+                `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT "bitmap列")
+            AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            BUCKETS 1
+            PROPERTIES ( "replication_num" = "1", "light_schema_change" = "true" );
+        """
+
+    // add materialized view (failed)
+    def result = "null"
+    def mvName = "mv1"
+    sql "create materialized view ${mvName} as select user_id, date, city, age, sex, sum(cost) from ${tableName} group by user_id, date, city, age, sex;"
+    while (!result.contains("CANCELLED")){
+        result = sql "SHOW ALTER TABLE MATERIALIZED VIEW WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+        result = result.toString()
+        logger.info("result: ${result}")
+        if(result.contains("FINISHED")){
+            assertTrue(false);
+        }
+        Thread.sleep(100)
+    }
+
+    Thread.sleep(1000)
+
+    //add rollup (failed)
+    result = "null"
+    def rollupName = "rollup_cost"
+    sql "ALTER TABLE ${tableName} ADD ROLLUP ${rollupName}(`user_id`,`date`,`city`,`age`, `sex`, cost);"
+    while (!result.contains("CANCELLED")){
+        result = sql "SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+        result = result.toString()
+        logger.info("result: ${result}")
+        if(result.contains("FINISHED")){
+            assertTrue(false);
+        }
+        Thread.sleep(100)
+    }
+}
diff --git a/regression-test/suites/rollup_p0/link_schema_change/test_rollup_dup_fail.groovy b/regression-test/suites/rollup_p0/link_schema_change/test_rollup_dup_fail.groovy
new file mode 100644
index 0000000000..a3a1c726b8
--- /dev/null
+++ b/regression-test/suites/rollup_p0/link_schema_change/test_rollup_dup_fail.groovy
@@ -0,0 +1,53 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite ("test_rollup_dup_fail") {
+    def tableName = "test_rollup_dup_fail"
+
+    sql """ DROP TABLE IF EXISTS ${tableName} FORCE"""
+    sql """
+        CREATE TABLE IF NOT EXISTS ${tableName} (
+            `user_id` LARGEINT NOT NULL COMMENT "用户id",
+            `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+            `city` VARCHAR(20) COMMENT "用户所在城市",
+            `age` SMALLINT COMMENT "用户年龄",
+            `sex` TINYINT COMMENT "用户性别",
+            `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+            `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间",
+            `last_visit_date_not_null` DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+            `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+            `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+            `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+        DUPLICATE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+        BUCKETS 1
+        PROPERTIES ( "replication_num" = "1", "light_schema_change" = "true" );
+    """
+
+    //add rollup (failed)
+    result = "null"
+    rollupName = "rollup_cost"
+    sql "ALTER TABLE ${tableName} ADD ROLLUP ${rollupName}(`user_id`,`date`,`city`,`age`,`sex`) DUPLICATE KEY (`user_id`,`date`,`city`,`age`,`sex`);"
+    while (!result.contains("CANCELLED")){
+        result = sql "SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+        result = result.toString()
+        logger.info("result: ${result}")
+        if(result.contains("FINISHED")){
+            assertTrue(false);
+        }
+        Thread.sleep(100)
+    }
+}
diff --git a/regression-test/suites/rollup_p0/link_schema_change/test_rollup_uni_fail.groovy b/regression-test/suites/rollup_p0/link_schema_change/test_rollup_uni_fail.groovy
new file mode 100644
index 0000000000..41f1443bff
--- /dev/null
+++ b/regression-test/suites/rollup_p0/link_schema_change/test_rollup_uni_fail.groovy
@@ -0,0 +1,53 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite ("test_rollup_uni_fail") {
+    def tableName = "test_rollup_uni_fail"
+
+    /* unique */
+    sql """ DROP TABLE IF EXISTS ${tableName} FORCE"""
+    sql """
+        CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `city` VARCHAR(20) DEFAULT "beijing "COMMENT "用户所在城市",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
+            UNIQUE KEY(`user_id`, `date`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            BUCKETS 1
+            PROPERTIES ( "replication_num" = "1", "light_schema_change" = "true");
+    """
+
+    result = "null"
+    rollupName = "rollup_cost"
+    sql "ALTER TABLE ${tableName} ADD ROLLUP ${rollupName}(`user_id`,`date`,`age`, `sex`, cost);"
+    while (!result.contains("CANCELLED")){
+        result = sql "SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
+        result = result.toString()
+        logger.info("result: ${result}")
+        if(result.contains("FINISHED")){
+            assertTrue(false);
+        }
+        Thread.sleep(100)
+    }
+}
diff --git a/regression-test/suites/rollup_p0/test_rollup_agg_date.groovy b/regression-test/suites/rollup_p0/test_rollup_agg_date.groovy
index 40f1313c41..a65a03a97b 100644
--- a/regression-test/suites/rollup_p0/test_rollup_agg_date.groovy
+++ b/regression-test/suites/rollup_p0/test_rollup_agg_date.groovy
@@ -40,7 +40,7 @@ suite("test_rollup_agg_date", "rollup") {
             AGGREGATE KEY (datek1, datetimek1, datetimek2, datetimek3)
             DISTRIBUTED BY HASH(datek1) BUCKETS 5 properties("replication_num" = "1");
         """
-    sql """ALTER TABLE ${tbName} ADD ROLLUP rollup_date(datek1,datetimek1,datetimek2,datetimek3,datev1,datetimev1,datetimev2,datetimev3);"""
+    sql """ALTER TABLE ${tbName} ADD ROLLUP rollup_date(datek1,datetimek2,datetimek1,datetimek3,datev1,datetimev1,datetimev2,datetimev3);"""
     int max_try_secs = 60
     while (max_try_secs--) {
         String res = getJobRollupState(tbName)
diff --git a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy
index 5c7f29b91b..5bcef9b8d3 100644
--- a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy
@@ -79,7 +79,7 @@ suite ("test_agg_mv_schema_change") {
         //add materialized view
         def result = "null"
         def mvName = "mv1"
-        sql "create materialized view ${mvName} as select user_id, date, city, age, sex, sum(cost) from ${tableName} group by user_id, date, city, age, sex, cost;"
+        sql "create materialized view ${mvName} as select user_id, date, city, age, sum(cost) from ${tableName} group by user_id, date, city, age, sex;"
         while (!result.contains("FINISHED")){
             result = sql "SHOW ALTER TABLE MATERIALIZED VIEW WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
diff --git a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy
index 196f3a0071..acd9a82931 100644
--- a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy
@@ -80,7 +80,7 @@ suite ("test_agg_rollup_schema_change") {
         //add rollup
         def result = "null"
         def rollupName = "rollup_cost"
-        sql "ALTER TABLE ${tableName} ADD ROLLUP ${rollupName}(`user_id`,`date`,`city`,`age`,`sex`, cost);"
+        sql "ALTER TABLE ${tableName} ADD ROLLUP ${rollupName}(`user_id`,`date`,`city`,`age`, cost);"
         while (!result.contains("FINISHED")){
             result = sql "SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
diff --git a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy
index d62415f67a..9e3c885a0c 100644
--- a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy
@@ -79,7 +79,7 @@ suite ("test_dup_mv_schema_change") {
         //add materialized view
         def result = "null"
         def mvName = "mv1"
-        sql "create materialized view ${mvName} as select user_id, date, city, age,sex from ${tableName};"
+        sql "create materialized view ${mvName} as select user_id, date, city, age from ${tableName};"
         while (!result.contains("FINISHED")){
             result = sql "SHOW ALTER TABLE MATERIALIZED VIEW WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
diff --git a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy
index 67c4763b7e..94e6510555 100644
--- a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy
@@ -78,7 +78,7 @@ suite ("test_dup_rollup_schema_change") {
         //add rollup
         def result = "null"
         def rollupName = "rollup_cost"
-        sql "ALTER TABLE ${tableName} ADD ROLLUP ${rollupName}(`user_id`,`date`,`city`,`age`,`sex`, cost);"
+        sql "ALTER TABLE ${tableName} ADD ROLLUP ${rollupName}(`user_id`,`date`,`city`,`age`, cost);"
         while (!result.contains("FINISHED")){
             result = sql "SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
             result = result.toString()
diff --git a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy
index 70d836111e..cad59ed449 100644
--- a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy
@@ -78,7 +78,7 @@ suite ("test_uniq_mv_schema_change") {
     //add materialized view
     def result = "null"
     def mvName = "mv1"
-    sql "create materialized view ${mvName} as select user_id, date, city, age, sex from ${tableName} group by user_id, date, city, age, sex;"
+    sql "create materialized view ${mvName} as select user_id, date, city, age from ${tableName} group by user_id, date, city, age;"
     while (!result.contains("FINISHED")){
         result = sql "SHOW ALTER TABLE MATERIALIZED VIEW WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
         result = result.toString()
diff --git a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy
index fcbef30a85..ceef82590c 100644
--- a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy
@@ -78,7 +78,7 @@ suite ("test_uniq_rollup_schema_change") {
     //add rollup
     def result = "null"
     def rollupName = "rollup_cost"
-    sql "ALTER TABLE ${tableName} ADD ROLLUP ${rollupName}(`user_id`,`date`,`city`,`age`,`sex`, cost);"
+    sql "ALTER TABLE ${tableName} ADD ROLLUP ${rollupName}(`user_id`,`date`,`city`,`sex`, `age`, cost);"
     while (!result.contains("FINISHED")){
         result = sql "SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1;"
         result = result.toString()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[doris] 01/06: [improvement](multi-catalog) support hive 1.x (#15886)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git

commit d64fc25e0eb14ddd06465e7b3a5a797654f528d0
Author: Mingyu Chen <mo...@163.com>
AuthorDate: Fri Jan 13 18:32:12 2023 +0800

    [improvement](multi-catalog) support hive 1.x  (#15886)
    
    The inferface of hive metastore changes from version to version.
    Currently, Doris use hive 2.3.7 as hms client version.
    When using to connect hive 1.x, some interface such as get_table_req does not exist
    in hive 1.x. So we can't get metadata from hive 1.x.
    
    In this PR, I copied the HiveMetastoreClient from hive 2.3.7 release, and modify some of interface's
    implementation, so that it will use old interface to connect to hive 1.x.
    
    And when creating hms catalog, you can specify the hive version, eg:
    
    CREATE CATALOG `hive` PROPERTIES (
      "hive.metastore.uris" = "thrift://127.0.0.1:9083",
      "type" = "hms",
      "hive.version" = "1.1"
    );
    If hive.version does not specified, Doris will use hive 2.3.x compatible interface to visit hms.
---
 .../java/org/apache/doris/catalog/HMSResource.java |    1 +
 .../doris/catalog/external/HMSExternalTable.java   |    2 +-
 .../doris/datasource/HMSClientException.java       |    5 +-
 .../doris/datasource/HMSExternalCatalog.java       |    1 +
 .../apache/doris/datasource/InternalCatalog.java   |    1 +
 .../doris/datasource/hive/HiveMetaStoreClient.java | 2758 ++++++++++++++++++++
 .../doris/datasource/hive/HiveVersionUtil.java     |   75 +
 .../{ => hive}/PooledHiveMetaStoreClient.java      |    4 +-
 fe/pom.xml                                         |    2 +-
 9 files changed, 2844 insertions(+), 5 deletions(-)

diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HMSResource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HMSResource.java
index f21fa24961..6f096bc667 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HMSResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HMSResource.java
@@ -47,6 +47,7 @@ public class HMSResource extends Resource {
     private static final Logger LOG = LogManager.getLogger(HMSResource.class);
     public static final String HIVE_METASTORE_TYPE = "hive.metastore.type";
     public static final String DLF_TYPE = "dlf";
+    public static final String HIVE_VERSION = "hive.version";
     // required
     public static final String HIVE_METASTORE_URIS = "hive.metastore.uris";
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
index b5db5df42f..c0a1b54278 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java
@@ -21,7 +21,7 @@ import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.Type;
 import org.apache.doris.common.MetaNotFoundException;
 import org.apache.doris.datasource.HMSExternalCatalog;
-import org.apache.doris.datasource.PooledHiveMetaStoreClient;
+import org.apache.doris.datasource.hive.PooledHiveMetaStoreClient;
 import org.apache.doris.statistics.AnalysisTaskInfo;
 import org.apache.doris.statistics.AnalysisTaskScheduler;
 import org.apache.doris.statistics.BaseAnalysisTask;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSClientException.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSClientException.java
index fa2652b867..818967ce02 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSClientException.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSClientException.java
@@ -17,8 +17,11 @@
 
 package org.apache.doris.datasource;
 
+import org.apache.doris.common.util.Util;
+
 public class HMSClientException extends RuntimeException {
     public HMSClientException(String format, Throwable cause, Object... msg) {
-        super(String.format(format, msg), cause);
+        super(String.format(format, msg) + (cause == null ? "" : ". reason: " + Util.getRootCauseMessage(cause)),
+                cause);
     }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java
index 5556535b6c..5de9aec2eb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java
@@ -26,6 +26,7 @@ import org.apache.doris.catalog.HiveMetaStoreClientHelper;
 import org.apache.doris.catalog.external.ExternalDatabase;
 import org.apache.doris.catalog.external.HMSExternalDatabase;
 import org.apache.doris.common.Config;
+import org.apache.doris.datasource.hive.PooledHiveMetaStoreClient;
 import org.apache.doris.datasource.hive.event.MetastoreNotificationFetchException;
 
 import com.google.common.collect.Lists;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
index 0a264598d8..23f1945fdc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
@@ -137,6 +137,7 @@ import org.apache.doris.common.util.QueryableReentrantLock;
 import org.apache.doris.common.util.SqlParserUtils;
 import org.apache.doris.common.util.TimeUtils;
 import org.apache.doris.common.util.Util;
+import org.apache.doris.datasource.hive.PooledHiveMetaStoreClient;
 import org.apache.doris.external.elasticsearch.EsRepository;
 import org.apache.doris.external.hudi.HudiProperty;
 import org.apache.doris.external.hudi.HudiTable;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreClient.java
new file mode 100644
index 0000000000..d678f3db63
--- /dev/null
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreClient.java
@@ -0,0 +1,2758 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.hive;
+
+import org.apache.doris.catalog.HMSResource;
+import org.apache.doris.datasource.hive.HiveVersionUtil.HiveVersion;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.hive.common.ObjectPair;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.auth.HiveAuthUtils;
+import org.apache.hadoop.hive.common.classification.InterfaceAudience;
+import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public;
+import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.conf.HiveConfUtil;
+import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook;
+import org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl;
+import org.apache.hadoop.hive.metastore.HiveMetaHook;
+import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreFilterHook;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.PartitionDropOptions;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
+import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest;
+import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
+import org.apache.hadoop.hive.metastore.api.AddForeignKeyRequest;
+import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.AddPartitionsResult;
+import org.apache.hadoop.hive.metastore.api.AddPrimaryKeyRequest;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.CacheFileMetadataRequest;
+import org.apache.hadoop.hive.metastore.api.CacheFileMetadataResult;
+import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
+import org.apache.hadoop.hive.metastore.api.ClearFileMetadataRequest;
+import org.apache.hadoop.hive.metastore.api.ClientCapabilities;
+import org.apache.hadoop.hive.metastore.api.ClientCapability;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionResponse;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+import org.apache.hadoop.hive.metastore.api.DataOperationType;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.DropConstraintRequest;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FireEventRequest;
+import org.apache.hadoop.hive.metastore.api.FireEventResponse;
+import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
+import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest;
+import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult;
+import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest;
+import org.apache.hadoop.hive.metastore.api.GetFileMetadataResult;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
+import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
+import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
+import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
+import org.apache.hadoop.hive.metastore.api.GetTableRequest;
+import org.apache.hadoop.hive.metastore.api.GetTablesRequest;
+import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest;
+import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse;
+import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest;
+import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleResponse;
+import org.apache.hadoop.hive.metastore.api.GrantRevokeType;
+import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
+import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult;
+import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
+import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.PutFileMetadataRequest;
+import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
+import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnOpenException;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.UnlockRequest;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.shims.Utils;
+import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.thrift.TApplicationException;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.net.InetAddress;
+import java.net.URI;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import javax.security.auth.login.LoginException;
+
+/**
+ * Hive Metastore Client.
+ * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient
+ * are not public and can change. Hence this is marked as unstable.
+ * For users who require retry mechanism when the connection between metastore and client is
+ * broken, RetryingMetaStoreClient class should be used.
+ * <p>
+ * <p>
+ * Copy from
+ * https://github.com/apache/hive/blob/rel/release-2.3.7/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+ * Doris Modification.
+ * To support different type of hive, copy this file from hive repo and modify some method based on hive version
+ * 1. getSchema()
+ * 2. getTable()
+ * 3. tableExists()
+ */
+@Public
+@Unstable
+public class HiveMetaStoreClient implements IMetaStoreClient {
+    private static final org.apache.logging.log4j.Logger LOG = LogManager.getLogger(HiveMetaStoreClient.class);
+    /**
+     * Capabilities of the current client. If this client talks to a MetaStore server in a manner
+     * implying the usage of some expanded features that require client-side support that this client
+     * doesn't have (e.g. a getting a table of a new type), it will get back failures when the
+     * capability checking is enabled (the default).
+     */
+    public static final ClientCapabilities VERSION = null; // No capabilities.
+    public static final ClientCapabilities TEST_VERSION = new ClientCapabilities(
+            Lists.newArrayList(ClientCapability.TEST_CAPABILITY)); // Test capability for tests.
+
+    ThriftHiveMetastore.Iface client = null;
+    private TTransport transport = null;
+    private boolean isConnected = false;
+    private URI[] metastoreUris;
+    private final HiveMetaHookLoader hookLoader;
+    protected final HiveConf conf;
+    // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
+    protected boolean fastpath = false;
+    private String tokenStrForm;
+    private final boolean localMetaStore;
+    private final MetaStoreFilterHook filterHook;
+    private final int fileMetadataBatchSize;
+
+    private Map<String, String> currentMetaVars;
+
+    private static final AtomicInteger connCount = new AtomicInteger(0);
+
+    // for thrift connects
+    private int retries = 5;
+    private long retryDelaySeconds = 0;
+    private final ClientCapabilities version;
+    private final HiveVersion hiveVersion;
+
+    public HiveMetaStoreClient(HiveConf conf) throws MetaException {
+        this(conf, null, true);
+    }
+
+    public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader) throws MetaException {
+        this(conf, hookLoader, true);
+    }
+
+    public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded)
+            throws MetaException {
+
+        this.hookLoader = hookLoader;
+        if (conf == null) {
+            conf = new HiveConf(HiveMetaStoreClient.class);
+            this.conf = conf;
+        } else {
+            this.conf = new HiveConf(conf);
+        }
+
+        hiveVersion = HiveVersionUtil.getVersion(conf.get(HMSResource.HIVE_VERSION));
+
+        version = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION;
+        filterHook = loadFilterHooks();
+        fileMetadataBatchSize = HiveConf.getIntVar(
+                conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX);
+
+        String msUri = conf.getVar(ConfVars.METASTOREURIS);
+        localMetaStore = HiveConfUtil.isEmbeddedMetaStore(msUri);
+        if (localMetaStore) {
+            if (!allowEmbedded) {
+                throw new MetaException("Embedded metastore is not allowed here. Please configure "
+                        + ConfVars.METASTOREURIS.varname + "; it is currently set to [" + msUri + "]");
+            }
+            // instantiate the metastore server handler directly instead of connecting
+            // through the network
+            if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) {
+                client = new HiveMetaStore.HMSHandler("hive client", this.conf, true);
+                fastpath = true;
+            } else {
+                client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
+            }
+            isConnected = true;
+            snapshotActiveConf();
+            return;
+        } else {
+            if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) {
+                throw new RuntimeException("You can't set hive.metastore.fastpath to true when you're "
+                        + "talking to the thrift metastore service.  You must run the metastore locally.");
+            }
+        }
+
+        // get the number retries
+        retries = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES);
+        retryDelaySeconds = conf.getTimeVar(
+                ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
+
+        // user wants file store based configuration
+        if (conf.getVar(HiveConf.ConfVars.METASTOREURIS) != null) {
+            String[] metastoreUrisString = conf.getVar(
+                    HiveConf.ConfVars.METASTOREURIS).split(",");
+            metastoreUris = new URI[metastoreUrisString.length];
+            try {
+                int i = 0;
+                for (String s : metastoreUrisString) {
+                    URI tmpUri = new URI(s);
+                    if (tmpUri.getScheme() == null) {
+                        throw new IllegalArgumentException("URI: " + s
+                                + " does not have a scheme");
+                    }
+                    metastoreUris[i++] = tmpUri;
+
+                }
+                // make metastore URIS random
+                List<URI> uriList = Arrays.asList(metastoreUris);
+                Collections.shuffle(uriList);
+                metastoreUris = uriList.toArray(new URI[uriList.size()]);
+            } catch (IllegalArgumentException e) {
+                throw (e);
+            } catch (Exception e) {
+                throw new MetaException(e.getMessage());
+            }
+        } else {
+            LOG.error("NOT getting uris from conf");
+            throw new MetaException("MetaStoreURIs not found in conf file");
+        }
+
+        //If HADOOP_PROXY_USER is set in env or property,
+        //then need to create metastore client that proxies as that user.
+        String hadoopProxyUser = "HADOOP_PROXY_USER";
+        String proxyUser = System.getenv(hadoopProxyUser);
+        if (proxyUser == null) {
+            proxyUser = System.getProperty(hadoopProxyUser);
+        }
+        //if HADOOP_PROXY_USER is set, create DelegationToken using real user
+        if (proxyUser != null) {
+            LOG.info(hadoopProxyUser + " is set. Using delegation "
+                    + "token for HiveMetaStore connection.");
+            try {
+                UserGroupInformation.getLoginUser().getRealUser().doAs(
+                        new PrivilegedExceptionAction<Void>() {
+                            @Override
+                            public Void run() throws Exception {
+                                open();
+                                return null;
+                            }
+                        });
+                String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer";
+                String delegationTokenStr = getDelegationToken(proxyUser, proxyUser);
+                Utils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr,
+                        delegationTokenPropString);
+                this.conf.setVar(ConfVars.METASTORE_TOKEN_SIGNATURE, delegationTokenPropString);
+                close();
+            } catch (Exception e) {
+                LOG.error("Error while setting delegation token for " + proxyUser, e);
+                if (e instanceof MetaException) {
+                    throw (MetaException) e;
+                } else {
+                    throw new MetaException(e.getMessage());
+                }
+            }
+        }
+        // finally open the store
+        open();
+    }
+
+    private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException {
+        Class<? extends MetaStoreFilterHook> authProviderClass = conf.getClass(
+                HiveConf.ConfVars.METASTORE_FILTER_HOOK.varname,
+                DefaultMetaStoreFilterHookImpl.class,
+                MetaStoreFilterHook.class);
+        String msg = "Unable to create instance of " + authProviderClass.getName() + ": ";
+        try {
+            Constructor<? extends MetaStoreFilterHook> constructor =
+                    authProviderClass.getConstructor(HiveConf.class);
+            return constructor.newInstance(conf);
+        } catch (NoSuchMethodException e) {
+            throw new IllegalStateException(msg + e.getMessage(), e);
+        } catch (SecurityException e) {
+            throw new IllegalStateException(msg + e.getMessage(), e);
+        } catch (InstantiationException e) {
+            throw new IllegalStateException(msg + e.getMessage(), e);
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(msg + e.getMessage(), e);
+        } catch (IllegalArgumentException e) {
+            throw new IllegalStateException(msg + e.getMessage(), e);
+        } catch (InvocationTargetException e) {
+            throw new IllegalStateException(msg + e.getMessage(), e);
+        }
+    }
+
+    /**
+     * Swaps the first element of the metastoreUris array with a random element from the
+     * remainder of the array.
+     */
+    private void promoteRandomMetaStoreURI() {
+        if (metastoreUris.length <= 1) {
+            return;
+        }
+        Random rng = new Random();
+        int index = rng.nextInt(metastoreUris.length - 1) + 1;
+        URI tmp = metastoreUris[0];
+        metastoreUris[0] = metastoreUris[index];
+        metastoreUris[index] = tmp;
+    }
+
+    @VisibleForTesting
+    public TTransport getTTransport() {
+        return transport;
+    }
+
+    @Override
+    public boolean isLocalMetaStore() {
+        return localMetaStore;
+    }
+
+    @Override
+    public boolean isCompatibleWith(HiveConf conf) {
+        // Make a copy of currentMetaVars, there is a race condition that
+        // currentMetaVars might be changed during the execution of the method
+        Map<String, String> currentMetaVarsCopy = currentMetaVars;
+        if (currentMetaVarsCopy == null) {
+            return false; // recreate
+        }
+        boolean compatible = true;
+        for (ConfVars oneVar : HiveConf.metaVars) {
+            // Since metaVars are all of different types, use string for comparison
+            String oldVar = currentMetaVarsCopy.get(oneVar.varname);
+            String newVar = conf.get(oneVar.varname, "");
+            if (oldVar == null
+                    || (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
+                LOG.info("Metastore configuration " + oneVar.varname
+                        + " changed from " + oldVar + " to " + newVar);
+                compatible = false;
+            }
+        }
+        return compatible;
+    }
+
+    @Override
+    public void setHiveAddedJars(String addedJars) {
+        HiveConf.setVar(conf, ConfVars.HIVEADDEDJARS, addedJars);
+    }
+
+    @Override
+    public void reconnect() throws MetaException {
+        if (localMetaStore) {
+            // For direct DB connections we don't yet support reestablishing connections.
+            throw new MetaException("For direct MetaStore DB connections, we don't support retries"
+                    + " at the client level.");
+        } else {
+            close();
+            // Swap the first element of the metastoreUris[] with a random element from the rest
+            // of the array. Rationale being that this method will generally be called when the default
+            // connection has died and the default connection is likely to be the first array element.
+            promoteRandomMetaStoreURI();
+            open();
+        }
+    }
+
+    /**
+     * @param dbname
+     * @param tblName
+     * @param newTbl
+     * @throws InvalidOperationException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table(
+     *java.lang.String, java.lang.String,
+     * org.apache.hadoop.hive.metastore.api.Table)
+     */
+    @Override
+    public void alter_table(String dbname, String tblName, Table newTbl)
+            throws InvalidOperationException, MetaException, TException {
+        alter_table_with_environmentContext(dbname, tblName, newTbl, null);
+    }
+
+    @Override
+    public void alter_table(String defaultDatabaseName, String tblName, Table table,
+            boolean cascade) throws InvalidOperationException, MetaException, TException {
+        EnvironmentContext environmentContext = new EnvironmentContext();
+        if (cascade) {
+            environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
+        }
+        alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext);
+    }
+
+    @Override
+    public void alter_table_with_environmentContext(String dbname, String tblName, Table newTbl,
+            EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
+        client.alter_table_with_environment_context(dbname, tblName, newTbl, envContext);
+    }
+
+    /**
+     * @param dbname
+     * @param name
+     * @param partVals
+     * @param newPart
+     * @throws InvalidOperationException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition(
+     *java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition)
+     */
+    @Override
+    public void renamePartition(final String dbname, final String name, final List<String> partVals,
+            final Partition newPart)
+            throws InvalidOperationException, MetaException, TException {
+        client.rename_partition(dbname, name, partVals, newPart);
+    }
+
+    private void open() throws MetaException {
+        isConnected = false;
+        TTransportException tte = null;
+        boolean useSSL = conf.getBoolVar(ConfVars.HIVE_METASTORE_USE_SSL);
+        boolean useSasl = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL);
+        boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
+        boolean useCompactProtocol = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_COMPACT_PROTOCOL);
+        int clientSocketTimeout = (int) conf.getTimeVar(
+                ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
+            for (URI store : metastoreUris) {
+                LOG.debug("Trying to connect to metastore with URI " + store);
+
+                try {
+                    if (useSasl) {
+                        // Wrap thrift connection with SASL for secure connection.
+                        try {
+                            HadoopThriftAuthBridge.Client authBridge =
+                                    ShimLoader.getHadoopThriftAuthBridge().createClient();
+
+                            // check if we should use delegation tokens to authenticate
+                            // the call below gets hold of the tokens if they are set up by hadoop
+                            // this should happen on the map/reduce tasks if the client added the
+                            // tokens into hadoop's credential store in the front end during job
+                            // submission.
+                            String tokenSig = conf.getVar(ConfVars.METASTORE_TOKEN_SIGNATURE);
+                            // tokenSig could be null
+                            tokenStrForm = Utils.getTokenStrForm(tokenSig);
+                            transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+
+                            if (tokenStrForm != null) {
+                                // authenticate using delegation tokens via the "DIGEST" mechanism
+                                transport = authBridge.createClientTransport(null, store.getHost(),
+                                        "DIGEST", tokenStrForm, transport,
+                                        MetaStoreUtils.getMetaStoreSaslProperties(conf));
+                            } else {
+                                String principalConfig =
+                                        conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL);
+                                transport = authBridge.createClientTransport(
+                                        principalConfig, store.getHost(), "KERBEROS", null,
+                                        transport, MetaStoreUtils.getMetaStoreSaslProperties(conf));
+                            }
+                        } catch (IOException ioe) {
+                            LOG.error("Couldn't create client transport", ioe);
+                            throw new MetaException(ioe.toString());
+                        }
+                    } else {
+                        if (useSSL) {
+                            try {
+                                String trustStorePath = conf.getVar(ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH).trim();
+                                if (trustStorePath.isEmpty()) {
+                                    throw new IllegalArgumentException(
+                                            ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH.varname
+                                                    + " Not configured for SSL connection");
+                                }
+                                String trustStorePassword = ShimLoader.getHadoopShims().getPassword(conf,
+                                        HiveConf.ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD.varname);
+
+                                // Create an SSL socket and connect
+                                transport = HiveAuthUtils.getSSLSocket(store.getHost(), store.getPort(),
+                                        clientSocketTimeout, trustStorePath, trustStorePassword);
+                                LOG.info("Opened an SSL connection to metastore, current connections: "
+                                        + connCount.incrementAndGet());
+                            } catch (IOException e) {
+                                throw new IllegalArgumentException(e);
+                            } catch (TTransportException e) {
+                                tte = e;
+                                throw new MetaException(e.toString());
+                            }
+                        } else {
+                            transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+                        }
+
+                        if (useFramedTransport) {
+                            transport = new TFramedTransport(transport);
+                        }
+                    }
+
+                    final TProtocol protocol;
+                    if (useCompactProtocol) {
+                        protocol = new TCompactProtocol(transport);
+                    } else {
+                        protocol = new TBinaryProtocol(transport);
+                    }
+                    client = new ThriftHiveMetastore.Client(protocol);
+                    try {
+                        if (!transport.isOpen()) {
+                            transport.open();
+                            LOG.info("Opened a connection to metastore, current connections: "
+                                    + connCount.incrementAndGet());
+                        }
+                        isConnected = true;
+                    } catch (TTransportException e) {
+                        tte = e;
+                        if (LOG.isDebugEnabled()) {
+                            LOG.warn("Failed to connect to the MetaStore Server...", e);
+                        } else {
+                            // Don't print full exception trace if DEBUG is not on.
+                            LOG.warn("Failed to connect to the MetaStore Server...");
+                        }
+                    }
+
+                    if (isConnected && !useSasl && conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) {
+                        // Call set_ugi, only in unsecure mode.
+                        try {
+                            UserGroupInformation ugi = Utils.getUGI();
+                            client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
+                        } catch (LoginException e) {
+                            LOG.warn("Failed to do login. set_ugi() is not successful, "
+                                    + "Continuing without it.", e);
+                        } catch (IOException e) {
+                            LOG.warn("Failed to find ugi of client set_ugi() is not successful, "
+                                    + "Continuing without it.", e);
+                        } catch (TException e) {
+                            LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. "
+                                    + "Continuing without it.", e);
+                        }
+                    }
+                } catch (MetaException e) {
+                    LOG.error("Unable to connect to metastore with URI " + store
+                            + " in attempt " + attempt, e);
+                }
+                if (isConnected) {
+                    break;
+                }
+            }
+            // Wait before launching the next round of connection retries.
+            if (!isConnected && retryDelaySeconds > 0) {
+                try {
+                    LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt.");
+                    Thread.sleep(retryDelaySeconds * 1000);
+                } catch (InterruptedException ignore) {
+                    ignore.printStackTrace();
+                }
+            }
+        }
+
+        if (!isConnected) {
+            throw new MetaException("Could not connect to meta store using any of the URIs provided."
+                    + " Most recent failure: " + StringUtils.stringifyException(tte));
+        }
+
+        snapshotActiveConf();
+
+        LOG.info("Connected to metastore.");
+    }
+
+    private void snapshotActiveConf() {
+        currentMetaVars = new HashMap<String, String>(HiveConf.metaVars.length);
+        for (ConfVars oneVar : HiveConf.metaVars) {
+            currentMetaVars.put(oneVar.varname, conf.get(oneVar.varname, ""));
+        }
+    }
+
+    @Override
+    public String getTokenStrForm() throws IOException {
+        return tokenStrForm;
+    }
+
+    @Override
+    public void close() {
+        isConnected = false;
+        currentMetaVars = null;
+        try {
+            if (null != client) {
+                client.shutdown();
+            }
+        } catch (TException e) {
+            LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e);
+        }
+        // Transport would have got closed via client.shutdown(), so we dont need this, but
+        // just in case, we make this call.
+        if ((transport != null) && transport.isOpen()) {
+            transport.close();
+            LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet());
+        }
+    }
+
+    @Override
+    public void setMetaConf(String key, String value) throws TException {
+        client.setMetaConf(key, value);
+    }
+
+    @Override
+    public String getMetaConf(String key) throws TException {
+        return client.getMetaConf(key);
+    }
+
+    /**
+     * @param newPart
+     * @return the added partition
+     * @throws InvalidObjectException
+     * @throws AlreadyExistsException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition
+     * (org.apache.hadoop.hive.metastore.api.Partition)
+     */
+    @Override
+    public Partition add_partition(Partition newPart)
+            throws InvalidObjectException, AlreadyExistsException, MetaException,
+            TException {
+        return add_partition(newPart, null);
+    }
+
+    public Partition add_partition(Partition newPart, EnvironmentContext envContext)
+            throws InvalidObjectException, AlreadyExistsException, MetaException,
+            TException {
+        Partition p = client.add_partition_with_environment_context(newPart, envContext);
+        return fastpath ? p : deepCopy(p);
+    }
+
+    /**
+     * @param newParts
+     * @throws InvalidObjectException
+     * @throws AlreadyExistsException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
+     */
+    @Override
+    public int add_partitions(List<Partition> newParts)
+            throws InvalidObjectException, AlreadyExistsException, MetaException,
+            TException {
+        return client.add_partitions(newParts);
+    }
+
+    @Override
+    public List<Partition> add_partitions(
+            List<Partition> parts, boolean ifNotExists, boolean needResults)
+            throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+        if (parts.isEmpty()) {
+            return needResults ? new ArrayList<Partition>() : null;
+        }
+        Partition part = parts.get(0);
+        AddPartitionsRequest req = new AddPartitionsRequest(
+                part.getDbName(), part.getTableName(), parts, ifNotExists);
+        req.setNeedResult(needResults);
+        AddPartitionsResult result = client.add_partitions_req(req);
+        return needResults ? filterHook.filterPartitions(result.getPartitions()) : null;
+    }
+
+    @Override
+    public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException {
+        return client.add_partitions_pspec(partitionSpec.toPartitionSpec());
+    }
+
+    /**
+     * @param tableName
+     * @param dbName
+     * @param partVals
+     * @return the appended partition
+     * @throws InvalidObjectException
+     * @throws AlreadyExistsException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String,
+     * java.lang.String, java.util.List)
+     */
+    @Override
+    public Partition appendPartition(String dbName, String tableName,
+            List<String> partVals) throws InvalidObjectException,
+            AlreadyExistsException, MetaException, TException {
+        return appendPartition(dbName, tableName, partVals, null);
+    }
+
+    public Partition appendPartition(String dbName, String tableName, List<String> partVals,
+            EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException,
+            MetaException, TException {
+        Partition p = client.append_partition_with_environment_context(dbName, tableName,
+                partVals, envContext);
+        return fastpath ? p : deepCopy(p);
+    }
+
+    @Override
+    public Partition appendPartition(String dbName, String tableName, String partName)
+            throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+        return appendPartition(dbName, tableName, partName, null);
+    }
+
+    public Partition appendPartition(String dbName, String tableName, String partName,
+            EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException,
+            MetaException, TException {
+        Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+                partName, envContext);
+        return fastpath ? p : deepCopy(p);
+    }
+
+    /**
+     * Exchange the partition between two tables
+     *
+     * @param partitionSpecs partitions specs of the parent partition to be exchanged
+     * @param destDb the db of the destination table
+     * @param destinationTableName the destination table name
+     * @ @return new partition after exchanging
+     */
+    @Override
+    public Partition exchange_partition(Map<String, String> partitionSpecs,
+            String sourceDb, String sourceTable, String destDb,
+            String destinationTableName) throws MetaException,
+            NoSuchObjectException, InvalidObjectException, TException {
+        return client.exchange_partition(partitionSpecs, sourceDb, sourceTable,
+                destDb, destinationTableName);
+    }
+
+    /**
+     * Exchange the partitions between two tables
+     *
+     * @param partitionSpecs partitions specs of the parent partition to be exchanged
+     * @param destDb the db of the destination table
+     * @param destinationTableName the destination table name
+     * @ @return new partitions after exchanging
+     */
+    @Override
+    public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+            String sourceDb, String sourceTable, String destDb,
+            String destinationTableName) throws MetaException,
+            NoSuchObjectException, InvalidObjectException, TException {
+        return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable,
+                destDb, destinationTableName);
+    }
+
+    @Override
+    public void validatePartitionNameCharacters(List<String> partVals)
+            throws TException, MetaException {
+        client.partition_name_has_valid_characters(partVals, true);
+    }
+
+    /**
+     * Create a new Database
+     *
+     * @param db
+     * @throws AlreadyExistsException
+     * @throws InvalidObjectException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database)
+     */
+    @Override
+    public void createDatabase(Database db)
+            throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
+        client.create_database(db);
+    }
+
+    /**
+     * @param tbl
+     * @throws MetaException
+     * @throws NoSuchObjectException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table
+     * (org.apache.hadoop.hive.metastore.api.Table)
+     */
+    @Override
+    public void createTable(Table tbl) throws AlreadyExistsException,
+            InvalidObjectException, MetaException, NoSuchObjectException, TException {
+        createTable(tbl, null);
+    }
+
+    public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException,
+            InvalidObjectException, MetaException, NoSuchObjectException, TException {
+        HiveMetaHook hook = getHook(tbl);
+        if (hook != null) {
+            hook.preCreateTable(tbl);
+        }
+        boolean success = false;
+        try {
+            // Subclasses can override this step (for example, for temporary tables)
+            create_table_with_environment_context(tbl, envContext);
+            if (hook != null) {
+                hook.commitCreateTable(tbl);
+            }
+            success = true;
+        } finally {
+            if (!success && (hook != null)) {
+                try {
+                    hook.rollbackCreateTable(tbl);
+                } catch (Exception e) {
+                    LOG.error("Create rollback failed with", e);
+                }
+            }
+        }
+    }
+
+    @Override
+    public void createTableWithConstraints(Table tbl,
+            List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys)
+            throws AlreadyExistsException, InvalidObjectException,
+            MetaException, NoSuchObjectException, TException {
+        HiveMetaHook hook = getHook(tbl);
+        if (hook != null) {
+            hook.preCreateTable(tbl);
+        }
+        boolean success = false;
+        try {
+            // Subclasses can override this step (for example, for temporary tables)
+            client.create_table_with_constraints(tbl, primaryKeys, foreignKeys);
+            if (hook != null) {
+                hook.commitCreateTable(tbl);
+            }
+            success = true;
+        } finally {
+            if (!success && (hook != null)) {
+                hook.rollbackCreateTable(tbl);
+            }
+        }
+    }
+
+    @Override
+    public void dropConstraint(String dbName, String tableName, String constraintName) throws
+            NoSuchObjectException, MetaException, TException {
+        client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName));
+    }
+
+    @Override
+    public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) throws
+            NoSuchObjectException, MetaException, TException {
+        client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols));
+    }
+
+    @Override
+    public void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws
+            NoSuchObjectException, MetaException, TException {
+        client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols));
+    }
+
+    /**
+     * @param type
+     * @return true or false
+     * @throws AlreadyExistsException
+     * @throws InvalidObjectException
+     * @throws MetaException
+     * @throws TException
+     * @see
+     * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(
+     *org.apache.hadoop.hive.metastore.api.Type)
+     */
+    public boolean createType(Type type) throws AlreadyExistsException,
+            InvalidObjectException, MetaException, TException {
+        return client.create_type(type);
+    }
+
+    /**
+     * @param name
+     * @throws NoSuchObjectException
+     * @throws InvalidOperationException
+     * @throws MetaException
+     * @throws TException
+     * @see
+     * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean)
+     */
+    @Override
+    public void dropDatabase(String name)
+            throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+        dropDatabase(name, true, false, false);
+    }
+
+    @Override
+    public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+            throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+        dropDatabase(name, deleteData, ignoreUnknownDb, false);
+    }
+
+    @Override
+    public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+            throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+        try {
+            getDatabase(name);
+        } catch (NoSuchObjectException e) {
+            if (!ignoreUnknownDb) {
+                throw e;
+            }
+            return;
+        }
+
+        if (cascade) {
+            List<String> tableList = getAllTables(name);
+            for (String table : tableList) {
+                try {
+                    // Subclasses can override this step (for example, for temporary tables)
+                    dropTable(name, table, deleteData, true);
+                } catch (UnsupportedOperationException e) {
+                    // Ignore Index tables, those will be dropped with parent tables
+                }
+            }
+        }
+        client.drop_database(name, deleteData, cascade);
+    }
+
+    /**
+     * @param tblName
+     * @param dbName
+     * @param partVals
+     * @return true or false
+     * @throws NoSuchObjectException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+     * java.lang.String, java.util.List, boolean)
+     */
+    public boolean dropPartition(String dbName, String tblName,
+            List<String> partVals) throws NoSuchObjectException, MetaException,
+            TException {
+        return dropPartition(dbName, tblName, partVals, true, null);
+    }
+
+    public boolean dropPartition(String dbName, String tblName, List<String> partVals,
+            EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException {
+        return dropPartition(dbName, tblName, partVals, true, envContext);
+    }
+
+    @Override
+    public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData)
+            throws NoSuchObjectException, MetaException, TException {
+        return dropPartition(dbName, tableName, partName, deleteData, null);
+    }
+
+    private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() {
+        Map<String, String> warehouseOptions = new HashMap<String, String>();
+        warehouseOptions.put("ifPurge", "TRUE");
+        return new EnvironmentContext(warehouseOptions);
+    }
+
+    public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData,
+            EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException {
+        return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+                deleteData, envContext);
+    }
+
+    /**
+     * @param dbName
+     * @param tblName
+     * @param partVals
+     * @param deleteData delete the underlying data or just delete the table in metadata
+     * @return true or false
+     * @throws NoSuchObjectException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+     * java.lang.String, java.util.List, boolean)
+     */
+    @Override
+    public boolean dropPartition(String dbName, String tblName,
+            List<String> partVals, boolean deleteData) throws NoSuchObjectException,
+            MetaException, TException {
+        return dropPartition(dbName, tblName, partVals, deleteData, null);
+    }
+
+    @Override
+    public boolean dropPartition(String dbName, String tblName,
+            List<String> partVals, PartitionDropOptions options) throws TException {
+        return dropPartition(dbName, tblName, partVals, options.deleteData,
+                options.purgeData ? getEnvironmentContextWithIfPurgeSet() : null);
+    }
+
+    public boolean dropPartition(String dbName, String tblName, List<String> partVals,
+            boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+            MetaException, TException {
+        return client.drop_partition_with_environment_context(dbName, tblName, partVals, deleteData,
+                envContext);
+    }
+
+    @Override
+    public List<Partition> dropPartitions(String dbName, String tblName,
+            List<ObjectPair<Integer, byte[]>> partExprs, PartitionDropOptions options)
+            throws TException {
+        RequestPartsSpec rps = new RequestPartsSpec();
+        List<DropPartitionsExpr> exprs = new ArrayList<DropPartitionsExpr>(partExprs.size());
+        for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
+            DropPartitionsExpr dpe = new DropPartitionsExpr();
+            dpe.setExpr(partExpr.getSecond());
+            dpe.setPartArchiveLevel(partExpr.getFirst());
+            exprs.add(dpe);
+        }
+        rps.setExprs(exprs);
+        DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
+        req.setDeleteData(options.deleteData);
+        req.setNeedResult(options.returnResults);
+        req.setIfExists(options.ifExists);
+        if (options.purgeData) {
+            LOG.info("Dropped partitions will be purged!");
+            req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet());
+        }
+        return client.drop_partitions_req(req).getPartitions();
+    }
+
+    @Override
+    public List<Partition> dropPartitions(String dbName, String tblName,
+            List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+            boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
+
+        return dropPartitions(dbName, tblName, partExprs,
+                PartitionDropOptions.instance()
+                        .deleteData(deleteData)
+                        .ifExists(ifExists)
+                        .returnResults(needResult));
+
+    }
+
+    @Override
+    public List<Partition> dropPartitions(String dbName, String tblName,
+            List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+            boolean ifExists) throws NoSuchObjectException, MetaException, TException {
+        // By default, we need the results from dropPartitions();
+        return dropPartitions(dbName, tblName, partExprs,
+                PartitionDropOptions.instance()
+                        .deleteData(deleteData)
+                        .ifExists(ifExists));
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+     */
+    @Override
+    public void dropTable(String dbname, String name, boolean deleteData,
+            boolean ignoreUnknownTab) throws MetaException, TException,
+            NoSuchObjectException, UnsupportedOperationException {
+        dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
+    }
+
+    /**
+     * Drop the table and choose whether to save the data in the trash.
+     *
+     * @param ifPurge completely purge the table (skipping trash) while removing
+     * data from warehouse
+     * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+     */
+    @Override
+    public void dropTable(String dbname, String name, boolean deleteData,
+            boolean ignoreUnknownTab, boolean ifPurge)
+            throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
+        //build new environmentContext with ifPurge;
+        EnvironmentContext envContext = null;
+        if (ifPurge) {
+            Map<String, String> warehouseOptions = null;
+            warehouseOptions = new HashMap<String, String>();
+            warehouseOptions.put("ifPurge", "TRUE");
+            envContext = new EnvironmentContext(warehouseOptions);
+        }
+        dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    @Deprecated
+    public void dropTable(String tableName, boolean deleteData)
+            throws MetaException, UnknownTableException, TException, NoSuchObjectException {
+        dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, deleteData, false, null);
+    }
+
+    /**
+     * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+     */
+    @Override
+    public void dropTable(String dbname, String name)
+            throws NoSuchObjectException, MetaException, TException {
+        dropTable(dbname, name, true, true, null);
+    }
+
+    /**
+     * Drop the table and choose whether to: delete the underlying table data;
+     * throw if the table doesn't exist; save the data in the trash.
+     *
+     * @param dbname
+     * @param name
+     * @param deleteData delete the underlying data or just delete the table in metadata
+     * @param ignoreUnknownTab don't throw if the requested table doesn't exist
+     * @param envContext for communicating with thrift
+     * @throws MetaException could not drop table properly
+     * @throws NoSuchObjectException the table wasn't found
+     * @throws TException a thrift communication error occurred
+     * @throws UnsupportedOperationException dropping an index table is not allowed
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
+     * java.lang.String, boolean)
+     */
+    public void dropTable(String dbname, String name, boolean deleteData,
+            boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
+            NoSuchObjectException, UnsupportedOperationException {
+        Table tbl;
+        try {
+            tbl = getTable(dbname, name);
+        } catch (NoSuchObjectException e) {
+            if (!ignoreUnknownTab) {
+                throw e;
+            }
+            return;
+        }
+        if (MetaStoreUtils.isIndexTable(tbl)) {
+            throw new UnsupportedOperationException("Cannot drop index tables");
+        }
+        HiveMetaHook hook = getHook(tbl);
+        if (hook != null) {
+            hook.preDropTable(tbl);
+        }
+        boolean success = false;
+        try {
+            drop_table_with_environment_context(dbname, name, deleteData, envContext);
+            if (hook != null) {
+                hook.commitDropTable(tbl,
+                        deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge"))));
+            }
+            success = true;
+        } catch (NoSuchObjectException e) {
+            if (!ignoreUnknownTab) {
+                throw e;
+            }
+        } finally {
+            if (!success && (hook != null)) {
+                hook.rollbackDropTable(tbl);
+            }
+        }
+    }
+
+    /**
+     * @param type
+     * @return true if the type is dropped
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String)
+     */
+    public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException {
+        return client.drop_type(type);
+    }
+
+    /**
+     * @param name
+     * @return map of types
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String)
+     */
+    public Map<String, Type> getTypeAll(String name) throws MetaException,
+            TException {
+        Map<String, Type> result = null;
+        Map<String, Type> fromClient = client.get_type_all(name);
+        if (fromClient != null) {
+            result = new LinkedHashMap<String, Type>();
+            for (String key : fromClient.keySet()) {
+                result.put(key, deepCopy(fromClient.get(key)));
+            }
+        }
+        return result;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public List<String> getDatabases(String databasePattern)
+            throws MetaException {
+        try {
+            return filterHook.filterDatabases(client.get_databases(databasePattern));
+        } catch (Exception e) {
+            throw new MetaException(e.getMessage());
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public List<String> getAllDatabases() throws MetaException {
+        try {
+            return filterHook.filterDatabases(client.get_all_databases());
+        } catch (Exception e) {
+            throw new MetaException(e.getMessage());
+        }
+    }
+
+    /**
+     * @param tblName
+     * @param dbName
+     * @param maxParts
+     * @return list of partitions
+     * @throws NoSuchObjectException
+     * @throws MetaException
+     * @throws TException
+     */
+    @Override
+    public List<Partition> listPartitions(String dbName, String tblName,
+            short maxParts) throws NoSuchObjectException, MetaException, TException {
+        List<Partition> parts = client.get_partitions(dbName, tblName, maxParts);
+        return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+    }
+
+    @Override
+    public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException {
+        return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+                client.get_partitions_pspec(dbName, tableName, maxParts)));
+    }
+
+    @Override
+    public List<Partition> listPartitions(String dbName, String tblName,
+            List<String> partVals, short maxParts)
+            throws NoSuchObjectException, MetaException, TException {
+        List<Partition> parts = client.get_partitions_ps(dbName, tblName, partVals, maxParts);
+        return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+    }
+
+    @Override
+    public List<Partition> listPartitionsWithAuthInfo(String dbName,
+            String tblName, short maxParts, String userName, List<String> groupNames)
+            throws NoSuchObjectException, MetaException, TException {
+        List<Partition> parts = client.get_partitions_with_auth(dbName, tblName, maxParts,
+                userName, groupNames);
+        return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+    }
+
+    @Override
+    public List<Partition> listPartitionsWithAuthInfo(String dbName,
+            String tblName, List<String> partVals, short maxParts,
+            String userName, List<String> groupNames) throws NoSuchObjectException,
+            MetaException, TException {
+        List<Partition> parts = client.get_partitions_ps_with_auth(dbName,
+                tblName, partVals, maxParts, userName, groupNames);
+        return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+    }
+
+    /**
+     * Get list of partitions matching specified filter
+     *
+     * @param dbName the database name
+     * @param tblName the table name
+     * @param filter the filter string,
+     * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can
+     * be done only on string partition keys.
+     * @param maxParts the maximum number of partitions to return,
+     * all partitions are returned if -1 is passed
+     * @return list of partitions
+     * @throws MetaException
+     * @throws NoSuchObjectException
+     * @throws TException
+     */
+    @Override
+    public List<Partition> listPartitionsByFilter(String dbName, String tblName,
+            String filter, short maxParts) throws MetaException,
+            NoSuchObjectException, TException {
+        List<Partition> parts = client.get_partitions_by_filter(dbName, tblName, filter, maxParts);
+        return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+    }
+
+    @Override
+    public PartitionSpecProxy listPartitionSpecsByFilter(String dbName, String tblName,
+            String filter, int maxParts) throws MetaException,
+            NoSuchObjectException, TException {
+        return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+                client.get_part_specs_by_filter(dbName, tblName, filter, maxParts)));
+    }
+
+    @Override
+    public boolean listPartitionsByExpr(String dbName, String tblName, byte[] expr,
+            String defaultPartitionName, short maxParts, List<Partition> result)
+            throws TException {
+        assert result != null;
+        PartitionsByExprRequest req = new PartitionsByExprRequest(
+                dbName, tblName, ByteBuffer.wrap(expr));
+        if (defaultPartitionName != null) {
+            req.setDefaultPartitionName(defaultPartitionName);
+        }
+        if (maxParts >= 0) {
+            req.setMaxParts(maxParts);
+        }
+        PartitionsByExprResult r = null;
+        try {
+            r = client.get_partitions_by_expr(req);
+        } catch (TApplicationException te) {
+            // TODO: backward compat for Hive <= 0.12. Can be removed later.
+            if (te.getType() != TApplicationException.UNKNOWN_METHOD
+                    && te.getType() != TApplicationException.WRONG_METHOD_NAME) {
+                throw te;
+            }
+            throw new MetaException(
+                    "Metastore doesn't support listPartitionsByExpr: " + te.getMessage());
+        }
+        if (fastpath) {
+            result.addAll(r.getPartitions());
+        } else {
+            r.setPartitions(filterHook.filterPartitions(r.getPartitions()));
+            // TODO: in these methods, do we really need to deepcopy?
+            deepCopyPartitions(r.getPartitions(), result);
+        }
+        return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst.
+    }
+
+    /**
+     * @param name
+     * @return the database
+     * @throws NoSuchObjectException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String)
+     */
+    @Override
+    public Database getDatabase(String name) throws NoSuchObjectException,
+            MetaException, TException {
+        Database d = client.get_database(name);
+        return fastpath ? d : deepCopy(filterHook.filterDatabase(d));
+    }
+
+    /**
+     * @param tblName
+     * @param dbName
+     * @param partVals
+     * @return the partition
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String,
+     * java.lang.String, java.util.List)
+     */
+    @Override
+    public Partition getPartition(String dbName, String tblName,
+            List<String> partVals) throws NoSuchObjectException, MetaException, TException {
+        Partition p = client.get_partition(dbName, tblName, partVals);
+        return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+    }
+
+    @Override
+    public List<Partition> getPartitionsByNames(String dbName, String tblName,
+            List<String> partNames) throws NoSuchObjectException, MetaException, TException {
+        List<Partition> parts = client.get_partitions_by_names(dbName, tblName, partNames);
+        return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+    }
+
+    @Override
+    public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+            throws MetaException, TException, NoSuchObjectException {
+        return client.get_partition_values(request);
+    }
+
+    @Override
+    public Partition getPartitionWithAuthInfo(String dbName, String tblName,
+            List<String> partVals, String userName, List<String> groupNames)
+            throws MetaException, UnknownTableException, NoSuchObjectException,
+            TException {
+        Partition p = client.get_partition_with_auth(dbName, tblName, partVals, userName,
+                groupNames);
+        return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+    }
+
+    /**
+     * @param name
+     * @param dbname
+     * @return the table
+     * @throws NoSuchObjectException
+     * @throws MetaException
+     * @throws TException
+     * @throws NoSuchObjectException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String,
+     * java.lang.String)
+     */
+    @Override
+    public Table getTable(String dbname, String name) throws MetaException,
+            TException, NoSuchObjectException {
+        Table t;
+        if (hiveVersion == HiveVersion.V1_0) {
+            t = client.get_table(dbname, name);
+        } else {
+            GetTableRequest req = new GetTableRequest(dbname, name);
+            req.setCapabilities(version);
+            t = client.get_table_req(req).getTable();
+        }
+        return fastpath ? t : deepCopy(filterHook.filterTable(t));
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    @Deprecated
+    public Table getTable(String tableName) throws MetaException, TException,
+            NoSuchObjectException {
+        Table t = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+        return fastpath ? t : filterHook.filterTable(t);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+            throws MetaException, InvalidOperationException, UnknownDBException, TException {
+        GetTablesRequest req = new GetTablesRequest(dbName);
+        req.setTblNames(tableNames);
+        req.setCapabilities(version);
+        List<Table> tabs = client.get_table_objects_by_name_req(req).getTables();
+        return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs));
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+            throws MetaException, TException, InvalidOperationException, UnknownDBException {
+        return filterHook.filterTableNames(dbName,
+                client.get_table_names_by_filter(dbName, filter, maxTables));
+    }
+
+    /**
+     * @param name
+     * @return the type
+     * @throws MetaException
+     * @throws TException
+     * @throws NoSuchObjectException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String)
+     */
+    public Type getType(String name) throws NoSuchObjectException, MetaException, TException {
+        return deepCopy(client.get_type(name));
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public List<String> getTables(String dbname, String tablePattern) throws MetaException {
+        try {
+            return filterHook.filterTableNames(dbname, client.get_tables(dbname, tablePattern));
+        } catch (Exception e) {
+            throw new MetaException(e.getMessage());
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public List<String> getTables(String dbname, String tablePattern, TableType tableType) throws MetaException {
+        try {
+            return filterHook.filterTableNames(dbname,
+                    client.get_tables_by_type(dbname, tablePattern, tableType.toString()));
+        } catch (Exception e) {
+            throw new MetaException(e.getMessage());
+        }
+    }
+
+    @Override
+    public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+            throws MetaException {
+        try {
+            return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes));
+        } catch (Exception e) {
+            throw new MetaException(e.getMessage());
+        }
+    }
+
+    private List<TableMeta> filterNames(List<TableMeta> metas) throws MetaException {
+        Map<String, TableMeta> sources = new LinkedHashMap<>();
+        Map<String, List<String>> dbTables = new LinkedHashMap<>();
+        for (TableMeta meta : metas) {
+            sources.put(meta.getDbName() + "." + meta.getTableName(), meta);
+            List<String> tables = dbTables.get(meta.getDbName());
+            if (tables == null) {
+                dbTables.put(meta.getDbName(), tables = new ArrayList<String>());
+            }
+            tables.add(meta.getTableName());
+        }
+        List<TableMeta> filtered = new ArrayList<>();
+        for (Map.Entry<String, List<String>> entry : dbTables.entrySet()) {
+            for (String table : filterHook.filterTableNames(entry.getKey(), entry.getValue())) {
+                filtered.add(sources.get(entry.getKey() + "." + table));
+            }
+        }
+        return filtered;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public List<String> getAllTables(String dbname) throws MetaException {
+        try {
+            return filterHook.filterTableNames(dbname, client.get_all_tables(dbname));
+        } catch (Exception e) {
+            throw new MetaException(e.getMessage());
+        }
+    }
+
+    @Override
+    public boolean tableExists(String databaseName, String tableName) throws MetaException,
+            TException, UnknownDBException {
+        try {
+            Table t;
+            if (hiveVersion == HiveVersion.V1_0) {
+                t = client.get_table(databaseName, tableName);
+            } else {
+                GetTableRequest req = new GetTableRequest(databaseName, tableName);
+                req.setCapabilities(version);
+                t = client.get_table_req(req).getTable();
+            }
+            return filterHook.filterTable(t) != null;
+        } catch (NoSuchObjectException e) {
+            return false;
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    @Deprecated
+    public boolean tableExists(String tableName) throws MetaException,
+            TException, UnknownDBException {
+        return tableExists(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+    }
+
+    @Override
+    public List<String> listPartitionNames(String dbName, String tblName,
+            short max) throws MetaException, TException {
+        return filterHook.filterPartitionNames(dbName, tblName,
+                client.get_partition_names(dbName, tblName, max));
+    }
+
+    @Override
+    public List<String> listPartitionNames(String dbName, String tblName,
+            List<String> partVals, short maxParts)
+            throws MetaException, TException, NoSuchObjectException {
+        return filterHook.filterPartitionNames(dbName, tblName,
+                client.get_partition_names_ps(dbName, tblName, partVals, maxParts));
+    }
+
+    /**
+     * Get number of partitions matching specified filter
+     *
+     * @param dbName the database name
+     * @param tblName the table name
+     * @param filter the filter string,
+     * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can
+     * be done only on string partition keys.
+     * @return number of partitions
+     * @throws MetaException
+     * @throws NoSuchObjectException
+     * @throws TException
+     */
+    @Override
+    public int getNumPartitionsByFilter(String dbName, String tblName,
+            String filter) throws MetaException,
+            NoSuchObjectException, TException {
+        return client.get_num_partitions_by_filter(dbName, tblName, filter);
+    }
+
+    @Override
+    public void alter_partition(String dbName, String tblName, Partition newPart)
+            throws InvalidOperationException, MetaException, TException {
+        client.alter_partition_with_environment_context(dbName, tblName, newPart, null);
+    }
+
+    @Override
+    public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
+            throws InvalidOperationException, MetaException, TException {
+        client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext);
+    }
+
+    @Override
+    public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+            throws InvalidOperationException, MetaException, TException {
+        client.alter_partitions_with_environment_context(dbName, tblName, newParts, null);
+    }
+
+    @Override
+    public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+            EnvironmentContext environmentContext)
+            throws InvalidOperationException, MetaException, TException {
+        client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext);
+    }
+
+    @Override
+    public void alterDatabase(String dbName, Database db)
+            throws MetaException, NoSuchObjectException, TException {
+        client.alter_database(dbName, db);
+    }
+
+    /**
+     * @param db
+     * @param tableName
+     * @throws UnknownTableException
+     * @throws UnknownDBException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String,
+     * java.lang.String)
+     */
+    @Override
+    public List<FieldSchema> getFields(String db, String tableName)
+            throws MetaException, TException, UnknownTableException,
+            UnknownDBException {
+        List<FieldSchema> fields = client.get_fields(db, tableName);
+        return fastpath ? fields : deepCopyFieldSchemas(fields);
+    }
+
+    /**
+     * create an index
+     *
+     * @param index the index object
+     * @param indexTable which stores the index data
+     * @throws InvalidObjectException
+     * @throws MetaException
+     * @throws NoSuchObjectException
+     * @throws TException
+     * @throws AlreadyExistsException
+     */
+    @Override
+    public void createIndex(Index index, Table indexTable)
+            throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException {
+        client.add_index(index, indexTable);
+    }
+
+    /**
+     * @param dbname
+     * @param baseTblName
+     * @param idxName
+     * @param newIdx
+     * @throws InvalidOperationException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_index(java.lang.String,
+     * java.lang.String, java.lang.String, org.apache.hadoop.hive.metastore.api.Index)
+     */
+    @Override
+    public void alter_index(String dbname, String baseTblName, String idxName, Index newIdx)
+            throws InvalidOperationException, MetaException, TException {
+        client.alter_index(dbname, baseTblName, idxName, newIdx);
+    }
+
+    /**
+     * @param dbName
+     * @param tblName
+     * @param indexName
+     * @return the index
+     * @throws MetaException
+     * @throws UnknownTableException
+     * @throws NoSuchObjectException
+     * @throws TException
+     */
+    @Override
+    public Index getIndex(String dbName, String tblName, String indexName)
+            throws MetaException, UnknownTableException, NoSuchObjectException,
+            TException {
+        return deepCopy(filterHook.filterIndex(client.get_index_by_name(dbName, tblName, indexName)));
+    }
+
+    /**
+     * list indexes of the give base table
+     *
+     * @param dbName
+     * @param tblName
+     * @param max
+     * @return the list of indexes
+     * @throws NoSuchObjectException
+     * @throws MetaException
+     * @throws TException
+     */
+    @Override
+    public List<String> listIndexNames(String dbName, String tblName, short max)
+            throws MetaException, TException {
+        return filterHook.filterIndexNames(dbName, tblName, client.get_index_names(dbName, tblName, max));
+    }
+
+    /**
+     * list all the index names of the give base table.
+     *
+     * @param dbName
+     * @param tblName
+     * @param max
+     * @return list of indexes
+     * @throws MetaException
+     * @throws TException
+     */
+    @Override
+    public List<Index> listIndexes(String dbName, String tblName, short max)
+            throws NoSuchObjectException, MetaException, TException {
+        return filterHook.filterIndexes(client.get_indexes(dbName, tblName, max));
+    }
+
+    @Override
+    public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest req)
+            throws MetaException, NoSuchObjectException, TException {
+        return client.get_primary_keys(req).getPrimaryKeys();
+    }
+
+    @Override
+    public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest req) throws MetaException,
+            NoSuchObjectException, TException {
+        return client.get_foreign_keys(req).getForeignKeys();
+    }
+
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    @Deprecated
+    //use setPartitionColumnStatistics instead
+    public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+            throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+            InvalidInputException {
+        return client.update_table_column_statistics(statsObj);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    @Deprecated
+    //use setPartitionColumnStatistics instead
+    public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj)
+            throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+            InvalidInputException {
+        return client.update_partition_column_statistics(statsObj);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
+            throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+            InvalidInputException {
+        return client.set_aggr_stats_for(request);
+    }
+
+    @Override
+    public void flushCache() {
+        try {
+            client.flushCache();
+        } catch (TException e) {
+            // Not much we can do about it honestly
+            LOG.warn("Got error flushing the cache", e);
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+            List<String> colNames) throws NoSuchObjectException, MetaException, TException,
+            InvalidInputException, InvalidObjectException {
+        return client.get_table_statistics_req(
+                new TableStatsRequest(dbName, tableName, colNames)).getTableStats();
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+            String dbName, String tableName, List<String> partNames, List<String> colNames)
+            throws NoSuchObjectException, MetaException, TException {
+        return client.get_partitions_statistics_req(
+                new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats();
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
+            String colName) throws NoSuchObjectException, InvalidObjectException, MetaException,
+            TException, InvalidInputException {
+        return client.delete_partition_column_statistics(dbName, tableName, partName, colName);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
+            throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+            InvalidInputException {
+        return client.delete_table_column_statistics(dbName, tableName, colName);
+    }
+
+    /**
+     * @param db
+     * @param tableName
+     * @throws UnknownTableException
+     * @throws UnknownDBException
+     * @throws MetaException
+     * @throws TException
+     * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String,
+     * java.lang.String)
+     */
+    @Override
+    public List<FieldSchema> getSchema(String db, String tableName)
+            throws MetaException, TException, UnknownTableException,
+            UnknownDBException {
+        List<FieldSchema> fields;
+        if (hiveVersion == HiveVersion.V1_0) {
+            fields = client.get_schema(db, tableName);
+        } else {
+            EnvironmentContext envCxt = null;
+            String addedJars = conf.getVar(ConfVars.HIVEADDEDJARS);
+            if (org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) {
+                Map<String, String> props = new HashMap<String, String>();
+                props.put("hive.added.jars.path", addedJars);
+                envCxt = new EnvironmentContext(props);
+            }
+            fields = client.get_schema_with_environment_context(db, tableName, envCxt);
+        }
+        return fastpath ? fields : deepCopyFieldSchemas(fields);
+    }
+
+    @Override
+    public String getConfigValue(String name, String defaultValue)
+            throws TException, ConfigValSecurityException {
+        return client.get_config_value(name, defaultValue);
+    }
+
+    @Override
+    public Partition getPartition(String db, String tableName, String partName)
+            throws MetaException, TException, UnknownTableException, NoSuchObjectException {
+        Partition p = client.get_partition_by_name(db, tableName, partName);
+        return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+    }
+
+    public Partition appendPartitionByName(String dbName, String tableName, String partName)
+            throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+        return appendPartitionByName(dbName, tableName, partName, null);
+    }
+
+    public Partition appendPartitionByName(String dbName, String tableName, String partName,
+            EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException,
+            MetaException, TException {
+        Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+                partName, envContext);
+        return fastpath ? p : deepCopy(p);
+    }
+
+    public boolean dropPartitionByName(String dbName, String tableName, String partName,
+            boolean deleteData) throws NoSuchObjectException, MetaException, TException {
+        return dropPartitionByName(dbName, tableName, partName, deleteData, null);
+    }
+
+    public boolean dropPartitionByName(String dbName, String tableName, String partName,
+            boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+            MetaException, TException {
+        return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+                deleteData, envContext);
+    }
+
+    private HiveMetaHook getHook(Table tbl) throws MetaException {
+        if (hookLoader == null) {
+            return null;
+        }
+        return hookLoader.getHook(tbl);
+    }
+
+    @Override
+    public List<String> partitionNameToVals(String name) throws MetaException, TException {
+        return client.partition_name_to_vals(name);
+    }
+
+    @Override
+    public Map<String, String> partitionNameToSpec(String name) throws MetaException, TException {
+        return client.partition_name_to_spec(name);
+    }
+
+    /**
+     * @param partition
+     * @return
+     */
+    private Partition deepCopy(Partition partition) {
+        Partition copy = null;
+        if (partition != null) {
+            copy = new Partition(partition);
+        }
+        return copy;
+    }
+
+    private Database deepCopy(Database database) {
+        Database copy = null;
+        if (database != null) {
+            copy = new Database(database);
+        }
+        return copy;
+    }
+
+    protected Table deepCopy(Table table) {
+        Table copy = null;
+        if (table != null) {
+            copy = new Table(table);
+        }
+        return copy;
+    }
+
+    private Index deepCopy(Index index) {
+        Index copy = null;
+        if (index != null) {
+            copy = new Index(index);
+        }
+        return copy;
+    }
+
+    private Type deepCopy(Type type) {
+        Type copy = null;
+        if (type != null) {
+            copy = new Type(type);
+        }
+        return copy;
+    }
+
+    private FieldSchema deepCopy(FieldSchema schema) {
+        FieldSchema copy = null;
+        if (schema != null) {
+            copy = new FieldSchema(schema);
+        }
+        return copy;
+    }
+
+    private Function deepCopy(Function func) {
+        Function copy = null;
+        if (func != null) {
+            copy = new Function(func);
+        }
+        return copy;
+    }
+
+    protected PrincipalPrivilegeSet deepCopy(PrincipalPrivilegeSet pps) {
+        PrincipalPrivilegeSet copy = null;
+        if (pps != null) {
+            copy = new PrincipalPrivilegeSet(pps);
+        }
+        return copy;
+    }
+
+    private List<Partition> deepCopyPartitions(List<Partition> partitions) {
+        return deepCopyPartitions(partitions, null);
+    }
+
+    private List<Partition> deepCopyPartitions(
+            Collection<Partition> src, List<Partition> dest) {
+        if (src == null) {
+            return dest;
+        }
+        if (dest == null) {
+            dest = new ArrayList<Partition>(src.size());
+        }
+        for (Partition part : src) {
+            dest.add(deepCopy(part));
+        }
+        return dest;
+    }
+
+    private List<Table> deepCopyTables(List<Table> tables) {
+        List<Table> copy = null;
+        if (tables != null) {
+            copy = new ArrayList<Table>();
+            for (Table tab : tables) {
+                copy.add(deepCopy(tab));
+            }
+        }
+        return copy;
+    }
+
+    protected List<FieldSchema> deepCopyFieldSchemas(List<FieldSchema> schemas) {
+        List<FieldSchema> copy = null;
+        if (schemas != null) {
+            copy = new ArrayList<FieldSchema>();
+            for (FieldSchema schema : schemas) {
+                copy.add(deepCopy(schema));
+            }
+        }
+        return copy;
+    }
+
+    @Override
+    public boolean dropIndex(String dbName, String tblName, String name,
+            boolean deleteData) throws NoSuchObjectException, MetaException,
+            TException {
+        return client.drop_index_by_name(dbName, tblName, name, deleteData);
+    }
+
+    @Override
+    public boolean grant_role(String roleName, String userName,
+            PrincipalType principalType, String grantor, PrincipalType grantorType,
+            boolean grantOption) throws MetaException, TException {
+        GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+        req.setRequestType(GrantRevokeType.GRANT);
+        req.setRoleName(roleName);
+        req.setPrincipalName(userName);
+        req.setPrincipalType(principalType);
+        req.setGrantor(grantor);
+        req.setGrantorType(grantorType);
+        req.setGrantOption(grantOption);
+        GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+        if (!res.isSetSuccess()) {
+            throw new MetaException("GrantRevokeResponse missing success field");
+        }
+        return res.isSuccess();
+    }
+
+    @Override
+    public boolean create_role(Role role)
+            throws MetaException, TException {
+        return client.create_role(role);
+    }
+
+    @Override
+    public boolean drop_role(String roleName) throws MetaException, TException {
+        return client.drop_role(roleName);
+    }
+
+    @Override
+    public List<Role> list_roles(String principalName,
+            PrincipalType principalType) throws MetaException, TException {
+        return client.list_roles(principalName, principalType);
+    }
+
+    @Override
+    public List<String> listRoleNames() throws MetaException, TException {
+        return client.get_role_names();
+    }
+
+    @Override
+    public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest req)
+            throws MetaException, TException {
+        return client.get_principals_in_role(req);
+    }
+
+    @Override
+    public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(
+            GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException {
+        return client.get_role_grants_for_principal(getRolePrincReq);
+    }
+
+    @Override
+    public boolean grant_privileges(PrivilegeBag privileges)
+            throws MetaException, TException {
+        GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+        req.setRequestType(GrantRevokeType.GRANT);
+        req.setPrivileges(privileges);
+        GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+        if (!res.isSetSuccess()) {
+            throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+        }
+        return res.isSuccess();
+    }
+
+    @Override
+    public boolean revoke_role(String roleName, String userName,
+            PrincipalType principalType, boolean grantOption) throws MetaException, TException {
+        GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+        req.setRequestType(GrantRevokeType.REVOKE);
+        req.setRoleName(roleName);
+        req.setPrincipalName(userName);
+        req.setPrincipalType(principalType);
+        req.setGrantOption(grantOption);
+        GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+        if (!res.isSetSuccess()) {
+            throw new MetaException("GrantRevokeResponse missing success field");
+        }
+        return res.isSuccess();
+    }
+
+    @Override
+    public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException,
+            TException {
+        GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+        req.setRequestType(GrantRevokeType.REVOKE);
+        req.setPrivileges(privileges);
+        req.setRevokeGrantOption(grantOption);
+        GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+        if (!res.isSetSuccess()) {
+            throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+        }
+        return res.isSuccess();
+    }
+
+    @Override
+    public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
+            String userName, List<String> groupNames) throws MetaException,
+            TException {
+        return client.get_privilege_set(hiveObject, userName, groupNames);
+    }
+
+    @Override
+    public List<HiveObjectPrivilege> list_privileges(String principalName,
+            PrincipalType principalType, HiveObjectRef hiveObject)
+            throws MetaException, TException {
+        return client.list_privileges(principalName, principalType, hiveObject);
+    }
+
+    public String getDelegationToken(String renewerKerberosPrincipalName) throws
+            MetaException, TException, IOException {
+        //a convenience method that makes the intended owner for the delegation
+        //token request the current user
+        String owner = conf.getUser();
+        return getDelegationToken(owner, renewerKerberosPrincipalName);
+    }
+
+    @Override
+    public String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws
+            MetaException, TException {
+        // This is expected to be a no-op, so we will return null when we use local metastore.
+        if (localMetaStore) {
+            return null;
+        }
+        return client.get_delegation_token(owner, renewerKerberosPrincipalName);
+    }
+
+    @Override
+    public long renewDelegationToken(String tokenStrForm) throws MetaException, TException {
+        if (localMetaStore) {
+            return 0;
+        }
+        return client.renew_delegation_token(tokenStrForm);
+
+    }
+
+    @Override
+    public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException {
+        if (localMetaStore) {
+            return;
+        }
+        client.cancel_delegation_token(tokenStrForm);
+    }
+
+    @Override
+    public boolean addToken(String tokenIdentifier, String delegationToken) throws TException {
+        return client.add_token(tokenIdentifier, delegationToken);
+    }
+
+    @Override
+    public boolean removeToken(String tokenIdentifier) throws TException {
+        return client.remove_token(tokenIdentifier);
+    }
+
+    @Override
+    public String getToken(String tokenIdentifier) throws TException {
+        return client.get_token(tokenIdentifier);
+    }
+
+    @Override
+    public List<String> getAllTokenIdentifiers() throws TException {
+        return client.get_all_token_identifiers();
+    }
+
+    @Override
+    public int addMasterKey(String key) throws MetaException, TException {
+        return client.add_master_key(key);
+    }
+
+    @Override
+    public void updateMasterKey(Integer seqNo, String key)
+            throws NoSuchObjectException, MetaException, TException {
+        client.update_master_key(seqNo, key);
+    }
+
+    @Override
+    public boolean removeMasterKey(Integer keySeq) throws TException {
+        return client.remove_master_key(keySeq);
+    }
+
+    @Override
+    public String[] getMasterKeys() throws TException {
+        List<String> keyList = client.get_master_keys();
+        return keyList.toArray(new String[keyList.size()]);
+    }
+
+    @Override
+    public ValidTxnList getValidTxns() throws TException {
+        return TxnUtils.createValidReadTxnList(client.get_open_txns(), 0);
+    }
+
+    @Override
+    public ValidTxnList getValidTxns(long currentTxn) throws TException {
+        return TxnUtils.createValidReadTxnList(client.get_open_txns(), currentTxn);
+    }
+
+    @Override
+    public long openTxn(String user) throws TException {
+        OpenTxnsResponse txns = openTxns(user, 1);
+        return txns.getTxn_ids().get(0);
+    }
+
+    @Override
+    public OpenTxnsResponse openTxns(String user, int numTxns) throws TException {
+        String hostname = null;
+        try {
+            hostname = InetAddress.getLocalHost().getHostName();
+        } catch (UnknownHostException e) {
+            LOG.error("Unable to resolve my host name " + e.getMessage());
+            throw new RuntimeException(e);
+        }
+        return client.open_txns(new OpenTxnRequest(numTxns, user, hostname));
+    }
+
+    @Override
+    public void rollbackTxn(long txnid) throws NoSuchTxnException, TException {
+        client.abort_txn(new AbortTxnRequest(txnid));
+    }
+
+    @Override
+    public void commitTxn(long txnid)
+            throws NoSuchTxnException, TxnAbortedException, TException {
+        client.commit_txn(new CommitTxnRequest(txnid));
+    }
+
+    @Override
+    public GetOpenTxnsInfoResponse showTxns() throws TException {
+        return client.get_open_txns_info();
+    }
+
+    @Override
+    public void abortTxns(List<Long> txnids) throws NoSuchTxnException, TException {
+        client.abort_txns(new AbortTxnsRequest(txnids));
+    }
+
+    @Override
+    public LockResponse lock(LockRequest request)
+            throws NoSuchTxnException, TxnAbortedException, TException {
+        return client.lock(request);
+    }
+
+    @Override
+    public LockResponse checkLock(long lockid)
+            throws NoSuchTxnException, TxnAbortedException, NoSuchLockException,
+            TException {
+        return client.check_lock(new CheckLockRequest(lockid));
+    }
+
+    @Override
+    public void unlock(long lockid)
+            throws NoSuchLockException, TxnOpenException, TException {
+        client.unlock(new UnlockRequest(lockid));
+    }
+
+    @Override
+    @Deprecated
+    public ShowLocksResponse showLocks() throws TException {
+        return client.show_locks(new ShowLocksRequest());
+    }
+
+    @Override
+    public ShowLocksResponse showLocks(ShowLocksRequest request) throws TException {
+        return client.show_locks(request);
+    }
+
+    @Override
+    public void heartbeat(long txnid, long lockid)
+            throws NoSuchLockException, NoSuchTxnException, TxnAbortedException,
+            TException {
+        HeartbeatRequest hb = new HeartbeatRequest();
+        hb.setLockid(lockid);
+        hb.setTxnid(txnid);
+        client.heartbeat(hb);
+    }
+
+    @Override
+    public HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max)
+            throws NoSuchTxnException, TxnAbortedException, TException {
+        HeartbeatTxnRangeRequest rqst = new HeartbeatTxnRangeRequest(min, max);
+        return client.heartbeat_txn_range(rqst);
+    }
+
+    @Override
+    @Deprecated
+    public void compact(String dbname, String tableName, String partitionName, CompactionType type)
+            throws TException {
+        CompactionRequest cr = new CompactionRequest();
+        if (dbname == null) {
+            cr.setDbname(MetaStoreUtils.DEFAULT_DATABASE_NAME);
+        } else {
+            cr.setDbname(dbname);
+        }
+        cr.setTablename(tableName);
+        if (partitionName != null) {
+            cr.setPartitionname(partitionName);
+        }
+        cr.setType(type);
+        client.compact(cr);
+    }
+
+    @Deprecated
+    @Override
+    public void compact(String dbname, String tableName, String partitionName, CompactionType type,
+            Map<String, String> tblproperties) throws TException {
+        compact2(dbname, tableName, partitionName, type, tblproperties);
+    }
+
+    @Override
+    public CompactionResponse compact2(String dbname, String tableName, String partitionName, CompactionType type,
+            Map<String, String> tblproperties) throws TException {
+        CompactionRequest cr = new CompactionRequest();
+        if (dbname == null) {
+            cr.setDbname(MetaStoreUtils.DEFAULT_DATABASE_NAME);
+        } else {
+            cr.setDbname(dbname);
+        }
+        cr.setTablename(tableName);
+        if (partitionName != null) {
+            cr.setPartitionname(partitionName);
+        }
+        cr.setType(type);
+        cr.setProperties(tblproperties);
+        return client.compact2(cr);
+    }
+
+    @Override
+    public ShowCompactResponse showCompactions() throws TException {
+        return client.show_compact(new ShowCompactRequest());
+    }
+
+    @Deprecated
+    @Override
+    public void addDynamicPartitions(long txnId, String dbName, String tableName,
+            List<String> partNames) throws TException {
+        client.add_dynamic_partitions(new AddDynamicPartitions(txnId, dbName, tableName, partNames));
+    }
+
+    @Override
+    public void addDynamicPartitions(long txnId, String dbName, String tableName,
+            List<String> partNames, DataOperationType operationType) throws TException {
+        AddDynamicPartitions adp = new AddDynamicPartitions(txnId, dbName, tableName, partNames);
+        adp.setOperationType(operationType);
+        client.add_dynamic_partitions(adp);
+    }
+
+    @Override
+    public void insertTable(Table table, boolean overwrite) throws MetaException {
+        boolean failed = true;
+        HiveMetaHook hook = getHook(table);
+        if (hook == null || !(hook instanceof DefaultHiveMetaHook)) {
+            return;
+        }
+        DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook;
+        try {
+            hiveMetaHook.commitInsertTable(table, overwrite);
+            failed = false;
+        } finally {
+            if (failed) {
+                hiveMetaHook.rollbackInsertTable(table, overwrite);
+            }
+        }
+    }
+
+    @InterfaceAudience.LimitedPrivate({"HCatalog"})
+    @Override
+    public NotificationEventResponse getNextNotification(long lastEventId, int maxEvents,
+            NotificationFilter filter) throws TException {
+        NotificationEventRequest rqst = new NotificationEventRequest(lastEventId);
+        rqst.setMaxEvents(maxEvents);
+        NotificationEventResponse rsp = client.get_next_notification(rqst);
+        LOG.debug("Got back " + rsp.getEventsSize() + " events");
+        if (filter == null) {
+            return rsp;
+        } else {
+            NotificationEventResponse filtered = new NotificationEventResponse();
+            if (rsp != null && rsp.getEvents() != null) {
+                for (NotificationEvent e : rsp.getEvents()) {
+                    if (filter.accept(e)) {
+                        filtered.addToEvents(e);
+                    }
+                }
+            }
+            return filtered;
+        }
+    }
+
+    @InterfaceAudience.LimitedPrivate({"HCatalog"})
+    @Override
+    public CurrentNotificationEventId getCurrentNotificationEventId() throws TException {
+        return client.get_current_notificationEventId();
+    }
+
+    @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"})
+    @Override
+    public FireEventResponse fireListenerEvent(FireEventRequest rqst) throws TException {
+        return client.fire_listener_event(rqst);
+    }
+
+    /**
+     * Creates a synchronized wrapper for any {@link IMetaStoreClient}.
+     * This may be used by multi-threaded applications until we have
+     * fixed all reentrancy bugs.
+     *
+     * @param client unsynchronized client
+     * @return synchronized client
+     */
+    public static IMetaStoreClient newSynchronizedClient(
+            IMetaStoreClient client) {
+        return (IMetaStoreClient) Proxy.newProxyInstance(
+                HiveMetaStoreClient.class.getClassLoader(),
+                new Class[] {IMetaStoreClient.class},
+                new SynchronizedHandler(client));
+    }
+
+    private static class SynchronizedHandler implements InvocationHandler {
+        private final IMetaStoreClient client;
+
+        SynchronizedHandler(IMetaStoreClient client) {
+            this.client = client;
+        }
+
+        @Override
+        public synchronized Object invoke(Object proxy, Method method, Object[] args)
+                throws Throwable {
+            try {
+                return method.invoke(client, args);
+            } catch (InvocationTargetException e) {
+                throw e.getTargetException();
+            }
+        }
+    }
+
+    @Override
+    public void markPartitionForEvent(String dbName, String tblName, Map<String, String> partKVs,
+            PartitionEventType eventType)
+            throws MetaException, TException, NoSuchObjectException, UnknownDBException,
+            UnknownTableException,
+            InvalidPartitionException, UnknownPartitionException {
+        assert dbName != null;
+        assert tblName != null;
+        assert partKVs != null;
+        client.markPartitionForEvent(dbName, tblName, partKVs, eventType);
+    }
+
+    @Override
+    public boolean isPartitionMarkedForEvent(String dbName, String tblName, Map<String, String> partKVs,
+            PartitionEventType eventType)
+            throws MetaException, NoSuchObjectException, UnknownTableException, UnknownDBException, TException,
+            InvalidPartitionException, UnknownPartitionException {
+        assert dbName != null;
+        assert tblName != null;
+        assert partKVs != null;
+        return client.isPartitionMarkedForEvent(dbName, tblName, partKVs, eventType);
+    }
+
+    @Override
+    public void createFunction(Function func) throws InvalidObjectException,
+            MetaException, TException {
+        client.create_function(func);
+    }
+
+    @Override
+    public void alterFunction(String dbName, String funcName, Function newFunction)
+            throws InvalidObjectException, MetaException, TException {
+        client.alter_function(dbName, funcName, newFunction);
+    }
+
+    @Override
+    public void dropFunction(String dbName, String funcName)
+            throws MetaException, NoSuchObjectException, InvalidObjectException,
+            InvalidInputException, TException {
+        client.drop_function(dbName, funcName);
+    }
+
+    @Override
+    public Function getFunction(String dbName, String funcName)
+            throws MetaException, TException {
+        Function f = client.get_function(dbName, funcName);
+        return fastpath ? f : deepCopy(f);
+    }
+
+    @Override
+    public List<String> getFunctions(String dbName, String pattern)
+            throws MetaException, TException {
+        return client.get_functions(dbName, pattern);
+    }
+
+    @Override
+    public GetAllFunctionsResponse getAllFunctions()
+            throws MetaException, TException {
+        return client.get_all_functions();
+    }
+
+    protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext)
+            throws AlreadyExistsException, InvalidObjectException,
+            MetaException, NoSuchObjectException, TException {
+        client.create_table_with_environment_context(tbl, envContext);
+    }
+
+    protected void drop_table_with_environment_context(String dbname, String name,
+            boolean deleteData, EnvironmentContext envContext) throws MetaException, TException,
+            NoSuchObjectException, UnsupportedOperationException {
+        client.drop_table_with_environment_context(dbname, name, deleteData, envContext);
+    }
+
+    @Override
+    public AggrStats getAggrColStatsFor(String dbName, String tblName,
+            List<String> colNames, List<String> partNames) throws NoSuchObjectException, MetaException, TException {
+        if (colNames.isEmpty() || partNames.isEmpty()) {
+            LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
+            return new AggrStats(new ArrayList<ColumnStatisticsObj>(), 0); // Nothing to aggregate
+        }
+        PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames);
+        return client.get_aggr_stats_for(req);
+    }
+
+    @Override
+    public Iterable<Entry<Long, ByteBuffer>> getFileMetadata(
+            final List<Long> fileIds) throws TException {
+        return new MetastoreMapIterable<Long, ByteBuffer>() {
+            private int listIndex = 0;
+
+            @Override
+            protected Map<Long, ByteBuffer> fetchNextBatch() throws TException {
+                if (listIndex == fileIds.size()) {
+                    return null;
+                }
+                int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size());
+                List<Long> subList = fileIds.subList(listIndex, endIndex);
+                GetFileMetadataResult resp = sendGetFileMetadataReq(subList);
+                // TODO: we could remember if it's unsupported and stop sending calls; although, it might
+                //       be a bad idea for HS2+standalone metastore that could be updated with support.
+                //       Maybe we should just remember this for some time.
+                if (!resp.isIsSupported()) {
+                    return null;
+                }
+                listIndex = endIndex;
+                return resp.getMetadata();
+            }
+        };
+    }
+
+    private GetFileMetadataResult sendGetFileMetadataReq(List<Long> fileIds) throws TException {
+        return client.get_file_metadata(new GetFileMetadataRequest(fileIds));
+    }
+
+    @Override
+    public Iterable<Entry<Long, MetadataPpdResult>> getFileMetadataBySarg(
+            final List<Long> fileIds, final ByteBuffer sarg, final boolean doGetFooters)
+            throws TException {
+        return new MetastoreMapIterable<Long, MetadataPpdResult>() {
+            private int listIndex = 0;
+
+            @Override
+            protected Map<Long, MetadataPpdResult> fetchNextBatch() throws TException {
+                if (listIndex == fileIds.size()) {
+                    return null;
+                }
+                int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size());
+                List<Long> subList = fileIds.subList(listIndex, endIndex);
+                GetFileMetadataByExprResult resp = sendGetFileMetadataBySargReq(
+                        sarg, subList, doGetFooters);
+                if (!resp.isIsSupported()) {
+                    return null;
+                }
+                listIndex = endIndex;
+                return resp.getMetadata();
+            }
+        };
+    }
+
+    private GetFileMetadataByExprResult sendGetFileMetadataBySargReq(
+            ByteBuffer sarg, List<Long> fileIds, boolean doGetFooters) throws TException {
+        GetFileMetadataByExprRequest req = new GetFileMetadataByExprRequest(fileIds, sarg);
+        req.setDoGetFooters(doGetFooters); // No need to get footers
+        return client.get_file_metadata_by_expr(req);
+    }
+
+    public abstract static class MetastoreMapIterable<K, V>
+            implements Iterable<Entry<K, V>>, Iterator<Entry<K, V>> {
+        private Iterator<Entry<K, V>> currentIter;
+
+        protected abstract Map<K, V> fetchNextBatch() throws TException;
+
+        @Override
+        public Iterator<Entry<K, V>> iterator() {
+            return this;
+        }
+
+        @Override
+        public boolean hasNext() {
+            ensureCurrentBatch();
+            return currentIter != null;
+        }
+
+        private void ensureCurrentBatch() {
+            if (currentIter != null && currentIter.hasNext()) {
+                return;
+            }
+            currentIter = null;
+            Map<K, V> currentBatch;
+            do {
+                try {
+                    currentBatch = fetchNextBatch();
+                } catch (TException ex) {
+                    throw new RuntimeException(ex);
+                }
+                if (currentBatch == null) {
+                    return; // No more data.
+                }
+            } while (currentBatch.isEmpty());
+            currentIter = currentBatch.entrySet().iterator();
+        }
+
+        @Override
+        public Entry<K, V> next() {
+            ensureCurrentBatch();
+            if (currentIter == null) {
+                throw new NoSuchElementException();
+            }
+            return currentIter.next();
+        }
+
+        @Override
+        public void remove() {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    @Override
+    public void clearFileMetadata(List<Long> fileIds) throws TException {
+        ClearFileMetadataRequest req = new ClearFileMetadataRequest();
+        req.setFileIds(fileIds);
+        client.clear_file_metadata(req);
+    }
+
+    @Override
+    public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata) throws TException {
+        PutFileMetadataRequest req = new PutFileMetadataRequest();
+        req.setFileIds(fileIds);
+        req.setMetadata(metadata);
+        client.put_file_metadata(req);
+    }
+
+    @Override
+    public boolean isSameConfObj(HiveConf c) {
+        return conf == c;
+    }
+
+    @Override
+    public boolean cacheFileMetadata(
+            String dbName, String tableName, String partName, boolean allParts) throws TException {
+        CacheFileMetadataRequest req = new CacheFileMetadataRequest();
+        req.setDbName(dbName);
+        req.setTblName(tableName);
+        if (partName != null) {
+            req.setPartName(partName);
+        } else {
+            req.setIsAllParts(allParts);
+        }
+        CacheFileMetadataResult result = client.cache_file_metadata(req);
+        return result.isIsSupported();
+    }
+}
diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveVersionUtil.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveVersionUtil.java
new file mode 100644
index 0000000000..749520b62c
--- /dev/null
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveVersionUtil.java
@@ -0,0 +1,75 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.hive;
+
+import com.google.common.base.Strings;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+/**
+ * For getting a compatible version of hive
+ * if user specified the version, it will parse it and return the compatible HiveVersion,
+ * otherwise, use DEFAULT_HIVE_VERSION
+ */
+public class HiveVersionUtil {
+    private static final Logger LOG = LogManager.getLogger(HiveVersionUtil.class);
+
+    private static final HiveVersion DEFAULT_HIVE_VERSION = HiveVersion.V2_3;
+
+    public enum HiveVersion {
+        V1_0,   // [1.0.0 - 1.2.2]
+        V2_0,   // [2.0.0 - 2.2.0]
+        V2_3,   // [2.3.0 - 2.3.6]
+        V3_0    // [3.0.0 - 3.1.2]
+    }
+
+    public static HiveVersion getVersion(String version) {
+        if (Strings.isNullOrEmpty(version)) {
+            return DEFAULT_HIVE_VERSION;
+        }
+        String[] parts = version.split("\\.");
+        if (parts.length < 2) {
+            LOG.warn("invalid hive version: " + version);
+            return DEFAULT_HIVE_VERSION;
+        }
+        try {
+            int major = Integer.parseInt(parts[0]);
+            int minor = Integer.parseInt(parts[1]);
+            if (major == 1) {
+                return HiveVersion.V1_0;
+            } else if (major == 2) {
+                if (minor >= 0 && minor <= 2) {
+                    return HiveVersion.V1_0;
+                } else if (minor >= 3) {
+                    return HiveVersion.V2_3;
+                } else {
+                    LOG.warn("invalid hive version: " + version);
+                    return DEFAULT_HIVE_VERSION;
+                }
+            } else if (major >= 3) {
+                return HiveVersion.V2_3;
+            } else {
+                LOG.warn("invalid hive version: " + version);
+                return DEFAULT_HIVE_VERSION;
+            }
+        } catch (NumberFormatException e) {
+            LOG.warn("invalid hive version: " + version);
+            return DEFAULT_HIVE_VERSION;
+        }
+    }
+}
diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/PooledHiveMetaStoreClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/PooledHiveMetaStoreClient.java
similarity index 98%
rename from fe/fe-core/src/main/java/org/apache/doris/datasource/PooledHiveMetaStoreClient.java
rename to fe/fe-core/src/main/java/org/apache/doris/datasource/hive/PooledHiveMetaStoreClient.java
index 008253c450..abc407cbef 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/PooledHiveMetaStoreClient.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/PooledHiveMetaStoreClient.java
@@ -15,10 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package org.apache.doris.datasource;
+package org.apache.doris.datasource.hive;
 
 import org.apache.doris.catalog.HMSResource;
 import org.apache.doris.common.Config;
+import org.apache.doris.datasource.HMSClientException;
 import org.apache.doris.datasource.hive.event.MetastoreNotificationFetchException;
 
 import com.aliyun.datalake.metastore.hive2.ProxyMetaStoreClient;
@@ -26,7 +27,6 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
diff --git a/fe/pom.xml b/fe/pom.xml
index 24d00e8744..19c71ffdfd 100644
--- a/fe/pom.xml
+++ b/fe/pom.xml
@@ -926,7 +926,7 @@ under the License.
                 <artifactId>mariadb-java-client</artifactId>
                 <version>${mariadb-java-client.version}</version>
             </dependency>
-             <dependency>
+            <dependency>
                 <groupId>com.aliyun.datalake</groupId>
                 <artifactId>metastore-client-hive2</artifactId>
                 <version>${dlf-metastore-client-hive2.version}</version>


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[doris] 04/06: [fix](multi-catalog) fix bug that replay init catalog may happen after catalog is dropped (#15919)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 6bde53b102195bdcfcaf6c648f499a56c85331f4
Author: Mingyu Chen <mo...@163.com>
AuthorDate: Sat Jan 14 09:41:37 2023 +0800

    [fix](multi-catalog) fix bug that replay init catalog may happen after catalog is dropped (#15919)
---
 build.sh                                               |  1 +
 .../java/org/apache/doris/datasource/CatalogMgr.java   | 18 ++++++++++++++----
 2 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/build.sh b/build.sh
index 9ecf89dfca..130018a18b 100755
--- a/build.sh
+++ b/build.sh
@@ -145,6 +145,7 @@ if [[ "$#" == 1 ]]; then
     BUILD_META_TOOL='OFF'
     BUILD_SPARK_DPP=1
     BUILD_HIVE_UDF=1
+    BUILD_JAVA_UDF=1
     CLEAN=0
 else
     while true; do
diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
index e6cf9a4ee1..17b71aefaa 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
@@ -49,7 +49,6 @@ import org.apache.doris.persist.gson.GsonUtils;
 import org.apache.doris.qe.ConnectContext;
 import org.apache.doris.qe.ShowResultSet;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.gson.annotations.SerializedName;
@@ -515,17 +514,27 @@ public class CatalogMgr implements Writable, GsonPostProcessable {
         }
     }
 
+    // init catalog and init db can happen at any time,
+    // even after catalog or db is dropped.
+    // Because it may already hold the catalog or db object before they are being dropped.
+    // So just skip the edit log if object does not exist.
     public void replayInitCatalog(InitCatalogLog log) {
         ExternalCatalog catalog = (ExternalCatalog) idToCatalog.get(log.getCatalogId());
-        Preconditions.checkArgument(catalog != null);
+        if (catalog == null) {
+            return;
+        }
         catalog.replayInitCatalog(log);
     }
 
     public void replayInitExternalDb(InitDatabaseLog log) {
         ExternalCatalog catalog = (ExternalCatalog) idToCatalog.get(log.getCatalogId());
-        Preconditions.checkArgument(catalog != null);
+        if (catalog == null) {
+            return;
+        }
         ExternalDatabase db = catalog.getDbForReplay(log.getDbId());
-        Preconditions.checkArgument(db != null);
+        if (db == null) {
+            return;
+        }
         db.replayInitDb(log, catalog);
     }
 
@@ -598,3 +607,4 @@ public class CatalogMgr implements Writable, GsonPostProcessable {
         internalCatalog = (InternalCatalog) idToCatalog.get(InternalCatalog.INTERNAL_DS_ID);
     }
 }
+


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[doris] 02/06: [feature](multi-catalog) support clickhouse jdbc catalog (#15780)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git

commit f2ec5dde8cbcac3ebf8041b40574fbe6e3be1f07
Author: yongkang.zhong <zh...@qq.com>
AuthorDate: Fri Jan 13 10:07:22 2023 +0800

    [feature](multi-catalog) support clickhouse jdbc catalog (#15780)
---
 be/src/vec/exec/vjdbc_connector.cpp                |  7 +-
 .../docs/ecosystem/external-table/multi-catalog.md | 51 ++++++++++++--
 .../Create/CREATE-CATALOG.md                       | 24 ++++++-
 .../docs/ecosystem/external-table/multi-catalog.md | 51 ++++++++++++--
 .../Create/CREATE-CATALOG.md                       | 24 ++++++-
 .../org/apache/doris/catalog/JdbcResource.java     |  5 +-
 .../org/apache/doris/external/jdbc/JdbcClient.java | 79 ++++++++++++++++++++++
 7 files changed, 225 insertions(+), 16 deletions(-)

diff --git a/be/src/vec/exec/vjdbc_connector.cpp b/be/src/vec/exec/vjdbc_connector.cpp
index b8898c0b2b..090cb6e0b6 100644
--- a/be/src/vec/exec/vjdbc_connector.cpp
+++ b/be/src/vec/exec/vjdbc_connector.cpp
@@ -235,7 +235,8 @@ Status JdbcConnector::_check_type(SlotDescriptor* slot_desc, const std::string&
             type_str, slot_desc->type().debug_string(), slot_desc->col_name());
     switch (slot_desc->type().type) {
     case TYPE_BOOLEAN: {
-        if (type_str != "java.lang.Boolean" && type_str != "java.math.BigDecimal") {
+        if (type_str != "java.lang.Boolean" && type_str != "java.math.BigDecimal" &&
+            type_str != "java.lang.Byte") {
             return Status::InternalError(error_msg);
         }
         break;
@@ -244,7 +245,7 @@ Status JdbcConnector::_check_type(SlotDescriptor* slot_desc, const std::string&
     case TYPE_SMALLINT:
     case TYPE_INT: {
         if (type_str != "java.lang.Short" && type_str != "java.lang.Integer" &&
-            type_str != "java.math.BigDecimal") {
+            type_str != "java.math.BigDecimal" && type_str != "java.lang.Byte") {
             return Status::InternalError(error_msg);
         }
         break;
@@ -281,7 +282,7 @@ Status JdbcConnector::_check_type(SlotDescriptor* slot_desc, const std::string&
     case TYPE_DATETIME:
     case TYPE_DATETIMEV2: {
         if (type_str != "java.sql.Timestamp" && type_str != "java.time.LocalDateTime" &&
-            type_str != "java.sql.Date") {
+            type_str != "java.sql.Date" && type_str != "java.time.LocalDate") {
             return Status::InternalError(error_msg);
         }
         break;
diff --git a/docs/en/docs/ecosystem/external-table/multi-catalog.md b/docs/en/docs/ecosystem/external-table/multi-catalog.md
index 41f94ad4b0..0d0439ec2b 100644
--- a/docs/en/docs/ecosystem/external-table/multi-catalog.md
+++ b/docs/en/docs/ecosystem/external-table/multi-catalog.md
@@ -381,7 +381,7 @@ Parameter | Description
 ### Connect JDBC
 
 
-The following example creates a Catalog connection named jdbc. This jdbc Catalog will connect to the specified database according to the 'jdbc.jdbc_url' parameter(`jdbc::mysql` in the example, so connect to the mysql database). Currently, only the MYSQL database type is supported.
+The following example creates a Catalog connection named jdbc. This jdbc Catalog will connect to the specified database according to the 'jdbc.jdbc_url' parameter(`jdbc::mysql` in the example, so connect to the mysql database). Currently, supports MYSQL, POSTGRESQL, CLICKHOUSE database types.
 
 **mysql catalog example**
 
@@ -391,8 +391,8 @@ CREATE RESOURCE mysql_resource PROPERTIES (
     "type"="jdbc",
     "user"="root",
     "password"="123456",
-    "jdbc_url" = "jdbc:mysql://127.0.0.1:13396/demo",
-    "driver_url" = "file:/path/to/mysql-connector-java-5.1.47.jar",
+    "jdbc_url" = "jdbc:mysql://127.0.0.1:3306/demo",
+    "driver_url" = "file:///path/to/mysql-connector-java-5.1.47.jar",
     "driver_class" = "com.mysql.jdbc.Driver"
 )
 CREATE CATALOG jdbc WITH RESOURCE mysql_resource;
@@ -400,7 +400,7 @@ CREATE CATALOG jdbc WITH RESOURCE mysql_resource;
 -- 1.2.0 Version
 CREATE CATALOG jdbc PROPERTIES (
     "type"="jdbc",
-    "jdbc.jdbc_url" = "jdbc:mysql://127.0.0.1:13396/demo",
+    "jdbc.jdbc_url" = "jdbc:mysql://127.0.0.1:3306/demo",
     ...
 )
 ```
@@ -414,7 +414,7 @@ CREATE RESOURCE pg_resource PROPERTIES (
     "user"="postgres",
     "password"="123456",
     "jdbc_url" = "jdbc:postgresql://127.0.0.1:5449/demo",
-    "driver_url" = "file:/path/to/postgresql-42.5.1.jar",
+    "driver_url" = "file:///path/to/postgresql-42.5.1.jar",
     "driver_class" = "org.postgresql.Driver"
 );
 CREATE CATALOG jdbc WITH RESOURCE pg_resource;
@@ -427,6 +427,28 @@ CREATE CATALOG jdbc PROPERTIES (
 )
 ```
 
+**CLICKHOUSE catalog example**
+
+```sql
+-- 1.2.0+ Version
+CREATE RESOURCE clickhouse_resource PROPERTIES (
+    "type"="jdbc",
+    "user"="default",
+    "password"="123456",
+    "jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
+    "driver_url" = "file:///path/to/clickhouse-jdbc-0.3.2-patch11-all.jar",
+    "driver_class" = "com.clickhouse.jdbc.ClickHouseDriver"
+)
+CREATE CATALOG jdbc WITH RESOURCE clickhouse_resource;
+
+-- 1.2.0 Version
+CREATE CATALOG jdbc PROPERTIES (
+    "type"="jdbc",
+    "jdbc.jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
+    ...
+)
+```
+
 Where `jdbc.driver_url` can be a remote jar package
 
 ```sql
@@ -723,6 +745,25 @@ For Hive/Iceberge/Hudi
 | bit/bit(n)/bit varying(n) | STRING | `bit` type corresponds to the `STRING` type of DORIS. The data read is `true/false`, not `1/0` |
 | uuid/josnb | STRING | |
 
+#### CLICKHOUSE
+
+| ClickHouse Type        | Doris Type | Comment                                                                                                                              |
+|------------------------|------------|--------------------------------------------------------------------------------------------------------------------------------------|
+| Bool                   | BOOLEAN    |                                                                                                                                      |
+| String                 | STRING     |                                                                                                                                      |
+| Date/Date32            | DATE       |                                                                                                                                      |
+| DateTime/DateTime64    | DATETIME   | Data that exceeds Doris's maximum DateTime accuracy is truncated                                                                     |
+| Float32                | FLOAT      |                                                                                                                                      |
+| Float64                | DOUBLE     |                                                                                                                                      |
+| Int8                   | TINYINT    |                                                                                                                                      |
+| Int16/UInt8            | SMALLINT   | DORIS does not have the UNSIGNED data type, so expand the type                                                                       |
+| Int32/UInt16           | INT        | DORIS does not have the UNSIGNED data type, so expand the type                                                                       |
+| Int64/Uint32           | BIGINT     | DORIS does not have the UNSIGNED data type, so expand the type                                                                       |
+| Int128/UInt64          | LARGEINT   | DORIS does not have the UNSIGNED data type, so expand the type                                                                       |
+| Int256/UInt128/UInt256 | STRING     | Doris does not have a data type of this magnitude and is processed with STRING                                                       |
+| DECIMAL                | DECIMAL    | Data that exceeds Doris's maximum Decimal precision is mapped to a STRING                                                            |
+| Enum/IPv4/IPv6/UUID    | STRING     | In the display of IPv4 and IPv6, an extra `/` is displayed before the data, which needs to be processed by the `split_part` function |
+
 ## Privilege Management
 
 Using Doris to access the databases and tables in the External Catalog is not controlled by the permissions of the external data source itself, but relies on Doris's own permission access management.
diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
index b6d76b94b1..33e9e4af9a 100644
--- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
+++ b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
@@ -168,7 +168,29 @@ CREATE CATALOG catalog_name PROPERTIES (
 		"jdbc.driver_url" = "file:/path/to/postgresql-42.5.1.jar",
 		"jdbc.driver_class" = "org.postgresql.Driver"
 	);
-	```	
+	```
+
+	**clickhouse**
+
+	```sql
+	-- 1.2.0+ Version
+	CREATE RESOURCE clickhouse_resource PROPERTIES (
+		"type"="jdbc",
+		"user"="default",
+		"password"="123456",
+		"jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
+		"driver_url" = "file:///path/to/clickhouse-jdbc-0.3.2-patch11-all.jar",
+		"driver_class" = "com.clickhouse.jdbc.ClickHouseDriver"
+	)
+	CREATE CATALOG jdbc WITH RESOURCE clickhouse_resource;
+	
+	-- 1.2.0 Version
+	CREATE CATALOG jdbc PROPERTIES (
+		"type"="jdbc",
+		"jdbc.jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
+		...
+	)
+	```
 
 ### Keywords
 
diff --git a/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md b/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md
index 58f7bfe578..8192eea436 100644
--- a/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md
+++ b/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md
@@ -454,7 +454,7 @@ mysql> select * from test;
 ### 连接JDBC
 
 以下示例,用于创建一个名为 jdbc 的 Catalog, 通过jdbc 连接指定的Mysql。
-jdbc Catalog会根据`jdbc.jdbc_url` 来连接指定的数据库(示例中是`jdbc::mysql`, 所以连接MYSQL数据库),当前支持MYSQL、POSTGRESQL数据库类型。
+jdbc Catalog会根据`jdbc.jdbc_url` 来连接指定的数据库(示例中是`jdbc::mysql`, 所以连接MYSQL数据库),当前支持MYSQL、POSTGRESQL、CLICKHOUSE数据库类型。
 
 **MYSQL catalog示例**
 
@@ -464,8 +464,8 @@ CREATE RESOURCE mysql_resource PROPERTIES (
     "type"="jdbc",
     "user"="root",
     "password"="123456",
-    "jdbc_url" = "jdbc:mysql://127.0.0.1:13396/demo",
-    "driver_url" = "file:/path/to/mysql-connector-java-5.1.47.jar",
+    "jdbc_url" = "jdbc:mysql://127.0.0.1:3306/demo",
+    "driver_url" = "file:///path/to/mysql-connector-java-5.1.47.jar",
     "driver_class" = "com.mysql.jdbc.Driver"
 )
 CREATE CATALOG jdbc WITH RESOURCE mysql_resource;
@@ -473,7 +473,7 @@ CREATE CATALOG jdbc WITH RESOURCE mysql_resource;
 -- 1.2.0 版本
 CREATE CATALOG jdbc PROPERTIES (
     "type"="jdbc",
-    "jdbc.jdbc_url" = "jdbc:mysql://127.0.0.1:13396/demo",
+    "jdbc.jdbc_url" = "jdbc:mysql://127.0.0.1:3306/demo",
     ...
 )
 ```
@@ -487,7 +487,7 @@ CREATE RESOURCE pg_resource PROPERTIES (
     "user"="postgres",
     "password"="123456",
     "jdbc_url" = "jdbc:postgresql://127.0.0.1:5449/demo",
-    "driver_url" = "file:/path/to/postgresql-42.5.1.jar",
+    "driver_url" = "file:///path/to/postgresql-42.5.1.jar",
     "driver_class" = "org.postgresql.Driver"
 );
 CREATE CATALOG jdbc WITH RESOURCE pg_resource;
@@ -500,6 +500,28 @@ CREATE CATALOG jdbc PROPERTIES (
 )
 ```
 
+**CLICKHOUSE catalog示例**
+
+```sql
+-- 1.2.0+ 版本
+CREATE RESOURCE clickhouse_resource PROPERTIES (
+    "type"="jdbc",
+    "user"="default",
+    "password"="123456",
+    "jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
+    "driver_url" = "file:///path/to/clickhouse-jdbc-0.3.2-patch11-all.jar",
+    "driver_class" = "com.clickhouse.jdbc.ClickHouseDriver"
+)
+CREATE CATALOG jdbc WITH RESOURCE clickhouse_resource;
+
+-- 1.2.0 版本
+CREATE CATALOG jdbc PROPERTIES (
+    "type"="jdbc",
+    "jdbc.jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
+    ...
+)
+```
+
 其中`jdbc.driver_url`可以是远程jar包:
 
 ```sql
@@ -722,6 +744,25 @@ select k1, k4 from table;           // Query OK.
 | bit/bit(n)/bit varying(n) | STRING | `bit`类型映射为doris的`STRING`类型,读出的数据是`true/false`, 而不是`1/0` |
 | uuid/josnb | STRING | |
 
+#### CLICKHOUSE
+
+| ClickHouse Type        | Doris Type | Comment                                             |
+|------------------------|------------|-----------------------------------------------------|
+| Bool                   | BOOLEAN    |                                                     |
+| String                 | STRING     |                                                     |
+| Date/Date32            | DATE       |                                                     |
+| DateTime/DateTime64    | DATETIME   | 对于超过了Doris最大的DateTime精度的数据,将截断处理                    |
+| Float32                | FLOAT      |                                                     |
+| Float64                | DOUBLE     |                                                     |
+| Int8                   | TINYINT    |                                                     |
+| Int16/UInt8            | SMALLINT   | Doris没有UNSIGNED数据类型,所以扩大一个数量级                       |
+| Int32/UInt16           | INT        | Doris没有UNSIGNED数据类型,所以扩大一个数量级                       |
+| Int64/Uint32           | BIGINT     | Doris没有UNSIGNED数据类型,所以扩大一个数量级                       |
+| Int128/UInt64          | LARGEINT   | Doris没有UNSIGNED数据类型,所以扩大一个数量级                       |
+| Int256/UInt128/UInt256 | STRING     | Doris没有这个数量级的数据类型,采用STRING处理                        |
+| DECIMAL                | DECIMAL    | 对于超过了Doris最大的Decimal精度的数据,将映射为STRING                |
+| Enum/IPv4/IPv6/UUID    | STRING     | 在显示上IPv4,IPv6会额外在数据最前面显示一个`/`,需要自己用`split_part`函数处理 |
+
 ## 权限管理
 
 使用 Doris 对 External Catalog 中库表进行访问,并不受外部数据目录自身的权限控制,而是依赖 Doris 自身的权限访问管理功能。
diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
index bbdf6491e1..81ee7b30cb 100644
--- a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
+++ b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md
@@ -174,7 +174,29 @@ CREATE CATALOG catalog_name PROPERTIES (
 		"jdbc.driver_url" = "file:/path/to/postgresql-42.5.1.jar",
 		"jdbc.driver_class" = "org.postgresql.Driver"
 	);
-	```	
+	```
+ 
+   **clickhouse**
+
+   ```sql
+   -- 1.2.0+ Version
+   CREATE RESOURCE clickhouse_resource PROPERTIES (
+       "type"="jdbc",
+       "user"="default",
+       "password"="123456",
+       "jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
+       "driver_url" = "file:///path/to/clickhouse-jdbc-0.3.2-patch11-all.jar",
+       "driver_class" = "com.clickhouse.jdbc.ClickHouseDriver"
+   )
+   CREATE CATALOG jdbc WITH RESOURCE clickhouse_resource;
+   
+   -- 1.2.0 Version
+   CREATE CATALOG jdbc PROPERTIES (
+       "type"="jdbc",
+       "jdbc.jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo",
+       ...
+   )
+   ```
 
 ### Keywords
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java
index 8a4e45968b..3862a38e3b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java
@@ -65,11 +65,12 @@ public class JdbcResource extends Resource {
     public static final String JDBC_POSTGRESQL = "jdbc:postgresql";
     public static final String JDBC_ORACLE = "jdbc:oracle";
     public static final String JDBC_SQLSERVER = "jdbc:sqlserver";
-
+    public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse";
     public static final String MYSQL = "MYSQL";
     public static final String POSTGRESQL = "POSTGRESQL";
     public static final String ORACLE = "ORACLE";
     private static final String SQLSERVER = "SQLSERVER";
+    public static final String CLICKHOUSE = "CLICKHOUSE";
 
     public static final String JDBC_PROPERTIES_PREFIX = "jdbc.";
     public static final String JDBC_URL = "jdbc_url";
@@ -234,6 +235,8 @@ public class JdbcResource extends Resource {
             return ORACLE;
         } else if (url.startsWith(JDBC_SQLSERVER)) {
             return SQLSERVER;
+        } else if (url.startsWith(JDBC_CLICKHOUSE)) {
+            return CLICKHOUSE;
         }
         throw new DdlException("Unsupported jdbc database type, please check jdbcUrl: " + url);
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/jdbc/JdbcClient.java b/fe/fe-core/src/main/java/org/apache/doris/external/jdbc/JdbcClient.java
index 11ece6f620..fa10819a7b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/external/jdbc/JdbcClient.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/external/jdbc/JdbcClient.java
@@ -163,6 +163,9 @@ public class JdbcClient {
                     rs = stmt.executeQuery("SELECT schema_name FROM information_schema.schemata "
                             + "where schema_owner='" + jdbcUser + "';");
                     break;
+                case JdbcResource.CLICKHOUSE:
+                    rs = stmt.executeQuery("SHOW DATABASES");
+                    break;
                 default:
                     throw  new JdbcClientException("Not supported jdbc type");
             }
@@ -195,6 +198,9 @@ public class JdbcClient {
                 case JdbcResource.POSTGRESQL:
                     rs = databaseMetaData.getTables(null, dbName, null, types);
                     break;
+                case JdbcResource.CLICKHOUSE:
+                    rs = databaseMetaData.getTables(null, dbName, null, types);
+                    break;
                 default:
                     throw new JdbcClientException("Unknown database type");
             }
@@ -222,6 +228,9 @@ public class JdbcClient {
                 case JdbcResource.POSTGRESQL:
                     rs = databaseMetaData.getTables(null, dbName, null, types);
                     break;
+                case JdbcResource.CLICKHOUSE:
+                    rs = databaseMetaData.getTables(null, dbName, null, types);
+                    break;
                 default:
                     throw new JdbcClientException("Unknown database type: " + dbType);
             }
@@ -290,6 +299,9 @@ public class JdbcClient {
                 case JdbcResource.POSTGRESQL:
                     rs = databaseMetaData.getColumns(null, dbName, tableName, null);
                     break;
+                case JdbcResource.CLICKHOUSE:
+                    rs = databaseMetaData.getColumns(null, dbName, tableName, null);
+                    break;
                 default:
                     throw new JdbcClientException("Unknown database type");
             }
@@ -320,6 +332,8 @@ public class JdbcClient {
                 return mysqlTypeToDoris(fieldSchema);
             case JdbcResource.POSTGRESQL:
                 return postgresqlTypeToDoris(fieldSchema);
+            case JdbcResource.CLICKHOUSE:
+                return clickhouseTypeToDoris(fieldSchema);
             default:
                 throw new JdbcClientException("Unknown database type");
         }
@@ -486,6 +500,69 @@ public class JdbcClient {
         }
     }
 
+    public Type clickhouseTypeToDoris(JdbcFieldSchema fieldSchema) {
+        String ckType = fieldSchema.getDataTypeName();
+        if (ckType.startsWith("LowCardinality")) {
+            ckType = ckType.substring(15, ckType.length() - 1);
+            if (ckType.startsWith("Nullable")) {
+                ckType = ckType.substring(9, ckType.length() - 1);
+            }
+        } else if (ckType.startsWith("Nullable")) {
+            ckType = ckType.substring(9, ckType.length() - 1);
+        }
+        if (ckType.startsWith("Decimal")) {
+            String[] accuracy = ckType.substring(8, ckType.length() - 1).split(", ");
+            int precision = Integer.parseInt(accuracy[0]);
+            int scale = Integer.parseInt(accuracy[1]);
+            if (precision <= ScalarType.MAX_DECIMAL128_PRECISION) {
+                if (!Config.enable_decimal_conversion && precision > ScalarType.MAX_DECIMALV2_PRECISION) {
+                    return ScalarType.createStringType();
+                }
+                return ScalarType.createDecimalType(precision, scale);
+            } else {
+                return ScalarType.createStringType();
+            }
+        } else if ("String".contains(ckType) || ckType.startsWith("Enum")
+                || ckType.startsWith("IPv") || "UUID".contains(ckType)
+                || ckType.startsWith("FixedString")) {
+            return ScalarType.createStringType();
+        } else if (ckType.startsWith("DateTime")) {
+            return ScalarType.getDefaultDateType(Type.DATETIME);
+        }
+        switch (ckType) {
+            case "Bool":
+                return Type.BOOLEAN;
+            case "Int8":
+                return Type.TINYINT;
+            case "Int16":
+            case "UInt8":
+                return Type.SMALLINT;
+            case "Int32":
+            case "UInt16":
+                return Type.INT;
+            case "Int64":
+            case "UInt32":
+                return Type.BIGINT;
+            case "Int128":
+            case "UInt64":
+                return Type.LARGEINT;
+            case "Int256":
+            case "UInt128":
+            case "UInt256":
+                return ScalarType.createStringType();
+            case "Float32":
+                return Type.FLOAT;
+            case "Float64":
+                return Type.DOUBLE;
+            case "Date":
+            case "Date32":
+                return ScalarType.getDefaultDateType(Type.DATE);
+            default:
+                return Type.UNSUPPORTED;
+        }
+        // Todo(zyk): Wait the JDBC external table support the array type then supported clickhouse array type
+    }
+
     public List<Column> getColumnsFromJdbc(String dbName, String tableName) {
         List<JdbcFieldSchema> jdbcTableSchema = getJdbcColumnsInfo(dbName, tableName);
         List<Column> dorisTableSchema = Lists.newArrayListWithCapacity(jdbcTableSchema.size());
@@ -505,6 +582,8 @@ public class JdbcClient {
             return JdbcResource.POSTGRESQL;
         } else if (url.startsWith(JdbcResource.JDBC_ORACLE)) {
             return JdbcResource.ORACLE;
+        } else if (url.startsWith(JdbcResource.JDBC_CLICKHOUSE)) {
+            return JdbcResource.CLICKHOUSE;
         }
         // else if (url.startsWith("jdbc:sqlserver")) {
         //     return SQLSERVER;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[doris] 06/06: [Enhencement](jdbc scanner) add profile for jdbc scanner (#15914)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 5142f30e803919773975959c4d5077df08f1d911
Author: Tiewei Fang <43...@users.noreply.github.com>
AuthorDate: Sat Jan 14 10:28:59 2023 +0800

    [Enhencement](jdbc scanner) add profile for jdbc scanner (#15914)
---
 be/src/olap/schema_change.cpp             |  2 +-
 be/src/vec/exec/scan/new_jdbc_scanner.cpp | 13 ++++++++++--
 be/src/vec/exec/scan/new_jdbc_scanner.h   | 10 +++++++++
 be/src/vec/exec/scan/vscan_node.h         |  1 +
 be/src/vec/exec/vjdbc_connector.cpp       | 34 +++++++++++++++++++++++--------
 be/src/vec/exec/vjdbc_connector.h         |  6 ++++++
 6 files changed, 55 insertions(+), 11 deletions(-)

diff --git a/be/src/olap/schema_change.cpp b/be/src/olap/schema_change.cpp
index 55eb3e8203..232b85bd8e 100644
--- a/be/src/olap/schema_change.cpp
+++ b/be/src/olap/schema_change.cpp
@@ -2185,7 +2185,7 @@ Status SchemaChangeHandler::_convert_historical_rowsets(const SchemaChangeParams
     }
 
     if (!sc_sorting && !sc_directly && sc_params.alter_tablet_type == AlterTabletType::ROLLUP) {
-        res = Status::Error<SCHEMA_SCHEMA_INVALID>(
+        res = Status::InternalError(
                 "Don't support to add materialized view by linked schema change");
         return process_alter_exit();
     }
diff --git a/be/src/vec/exec/scan/new_jdbc_scanner.cpp b/be/src/vec/exec/scan/new_jdbc_scanner.cpp
index 377ae6c800..80fc3669c8 100644
--- a/be/src/vec/exec/scan/new_jdbc_scanner.cpp
+++ b/be/src/vec/exec/scan/new_jdbc_scanner.cpp
@@ -17,6 +17,8 @@
 
 #include "new_jdbc_scanner.h"
 
+#include "util/runtime_profile.h"
+
 namespace doris::vectorized {
 NewJdbcScanner::NewJdbcScanner(RuntimeState* state, NewJdbcScanNode* parent, int64_t limit,
                                const TupleId& tuple_id, const std::string& query_string,
@@ -27,7 +29,14 @@ NewJdbcScanner::NewJdbcScanner(RuntimeState* state, NewJdbcScanNode* parent, int
           _tuple_id(tuple_id),
           _query_string(query_string),
           _tuple_desc(nullptr),
-          _table_type(table_type) {}
+          _table_type(table_type) {
+    _load_jar_timer = ADD_TIMER(get_parent()->_scanner_profile, "LoadJarTime");
+    _init_connector_timer = ADD_TIMER(get_parent()->_scanner_profile, "InitConnectorTime");
+    _check_type_timer = ADD_TIMER(get_parent()->_scanner_profile, "CheckTypeTime");
+    _get_data_timer = ADD_TIMER(get_parent()->_scanner_profile, "GetDataTime");
+    _execte_read_timer = ADD_TIMER(get_parent()->_scanner_profile, "ExecteReadTime");
+    _connector_close_timer = ADD_TIMER(get_parent()->_scanner_profile, "ConnectorCloseTime");
+}
 
 Status NewJdbcScanner::prepare(RuntimeState* state, VExprContext** vconjunct_ctx_ptr) {
     VLOG_CRITICAL << "NewJdbcScanner::Prepare";
@@ -67,7 +76,7 @@ Status NewJdbcScanner::prepare(RuntimeState* state, VExprContext** vconjunct_ctx
     _jdbc_param.query_string = std::move(_query_string);
     _jdbc_param.table_type = _table_type;
 
-    _jdbc_connector.reset(new (std::nothrow) JdbcConnector(_jdbc_param));
+    _jdbc_connector.reset(new (std::nothrow) JdbcConnector(this, _jdbc_param));
     if (_jdbc_connector == nullptr) {
         return Status::InternalError("new a jdbc scanner failed.");
     }
diff --git a/be/src/vec/exec/scan/new_jdbc_scanner.h b/be/src/vec/exec/scan/new_jdbc_scanner.h
index e88b33d252..4f869d0f41 100644
--- a/be/src/vec/exec/scan/new_jdbc_scanner.h
+++ b/be/src/vec/exec/scan/new_jdbc_scanner.h
@@ -18,6 +18,7 @@
 #pragma once
 
 #include "runtime/runtime_state.h"
+#include "util/runtime_profile.h"
 #include "vec/exec/scan/new_jdbc_scan_node.h"
 #include "vec/exec/scan/vscanner.h"
 #include "vec/exec/vjdbc_connector.h"
@@ -25,6 +26,8 @@ namespace doris {
 namespace vectorized {
 class NewJdbcScanner : public VScanner {
 public:
+    friend class JdbcConnector;
+
     NewJdbcScanner(RuntimeState* state, NewJdbcScanNode* parent, int64_t limit,
                    const TupleId& tuple_id, const std::string& query_string,
                    TOdbcTableType::type table_type, RuntimeProfile* profile);
@@ -37,6 +40,13 @@ public:
 protected:
     Status _get_block_impl(RuntimeState* state, Block* block, bool* eos) override;
 
+    RuntimeProfile::Counter* _load_jar_timer = nullptr;
+    RuntimeProfile::Counter* _init_connector_timer = nullptr;
+    RuntimeProfile::Counter* _get_data_timer = nullptr;
+    RuntimeProfile::Counter* _check_type_timer = nullptr;
+    RuntimeProfile::Counter* _execte_read_timer = nullptr;
+    RuntimeProfile::Counter* _connector_close_timer = nullptr;
+
 private:
     bool _is_init;
 
diff --git a/be/src/vec/exec/scan/vscan_node.h b/be/src/vec/exec/scan/vscan_node.h
index 8abd4b5e19..251c5dd50f 100644
--- a/be/src/vec/exec/scan/vscan_node.h
+++ b/be/src/vec/exec/scan/vscan_node.h
@@ -48,6 +48,7 @@ public:
     friend class VScanner;
     friend class NewOlapScanner;
     friend class VFileScanner;
+    friend class NewJdbcScanner;
     friend class ScannerContext;
 
     Status init(const TPlanNode& tnode, RuntimeState* state = nullptr) override;
diff --git a/be/src/vec/exec/vjdbc_connector.cpp b/be/src/vec/exec/vjdbc_connector.cpp
index 090cb6e0b6..b7c2b388be 100644
--- a/be/src/vec/exec/vjdbc_connector.cpp
+++ b/be/src/vec/exec/vjdbc_connector.cpp
@@ -25,10 +25,12 @@
 #include "runtime/define_primitive_type.h"
 #include "runtime/user_function_cache.h"
 #include "util/jni-util.h"
+#include "util/runtime_profile.h"
 #include "vec/columns/column_array.h"
 #include "vec/columns/column_nullable.h"
 #include "vec/data_types/data_type_factory.hpp"
 #include "vec/data_types/data_type_string.h"
+#include "vec/exec/scan/new_jdbc_scanner.h"
 #include "vec/functions/simple_function_factory.h"
 
 namespace doris {
@@ -51,6 +53,12 @@ JdbcConnector::JdbcConnector(const JdbcConnectorParam& param)
           _conn_param(param),
           _closed(false) {}
 
+JdbcConnector::JdbcConnector(NewJdbcScanner* jdbc_scanner, const JdbcConnectorParam& param)
+        : TableConnector(param.tuple_desc, param.query_string),
+          _jdbc_scanner(jdbc_scanner),
+          _conn_param(param),
+          _closed(false) {}
+
 JdbcConnector::~JdbcConnector() {
     if (!_closed) {
         close();
@@ -63,6 +71,7 @@ JdbcConnector::~JdbcConnector() {
 #define DELETE_BASIC_JAVA_CLAZZ_REF(CPP_TYPE) env->DeleteGlobalRef(_executor_##CPP_TYPE##_clazz);
 
 Status JdbcConnector::close() {
+    SCOPED_TIMER(_jdbc_scanner->_connector_close_timer);
     _closed = true;
     if (!_is_open) {
         return Status::OK();
@@ -123,10 +132,12 @@ Status JdbcConnector::open(RuntimeState* state, bool read) {
         if (_conn_param.resource_name.empty()) {
             // for jdbcExternalTable, _conn_param.resource_name == ""
             // so, we use _conn_param.driver_path as key of jarpath
+            SCOPED_TIMER(_jdbc_scanner->_load_jar_timer);
             RETURN_IF_ERROR(function_cache->get_jarpath(
                     std::abs((int64_t)hash_str(_conn_param.driver_path)), _conn_param.driver_path,
                     _conn_param.driver_checksum, &local_location));
         } else {
+            SCOPED_TIMER(_jdbc_scanner->_load_jar_timer);
             RETURN_IF_ERROR(function_cache->get_jarpath(
                     std::abs((int64_t)hash_str(_conn_param.resource_name)), _conn_param.driver_path,
                     _conn_param.driver_checksum, &local_location));
@@ -146,8 +157,10 @@ Status JdbcConnector::open(RuntimeState* state, bool read) {
         // Pushed frame will be popped when jni_frame goes out-of-scope.
         RETURN_IF_ERROR(jni_frame.push(env));
         RETURN_IF_ERROR(SerializeThriftMsg(env, &ctor_params, &ctor_params_bytes));
-        _executor_obj = env->NewObject(_executor_clazz, _executor_ctor_id, ctor_params_bytes);
-
+        {
+            SCOPED_TIMER(_jdbc_scanner->_init_connector_timer);
+            _executor_obj = env->NewObject(_executor_clazz, _executor_ctor_id, ctor_params_bytes);
+        }
         jbyte* pBytes = env->GetByteArrayElements(ctor_params_bytes, nullptr);
         env->ReleaseByteArrayElements(ctor_params_bytes, pBytes, JNI_ABORT);
         env->DeleteLocalRef(ctor_params_bytes);
@@ -172,19 +185,23 @@ Status JdbcConnector::query() {
 
     JNIEnv* env = nullptr;
     RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env));
-    jint colunm_count =
-            env->CallNonvirtualIntMethod(_executor_obj, _executor_clazz, _executor_read_id);
-    RETURN_IF_ERROR(JniUtil::GetJniExceptionMsg(env));
-
-    if (colunm_count != materialize_num) {
-        return Status::InternalError("input and output column num not equal of jdbc query.");
+    {
+        SCOPED_TIMER(_jdbc_scanner->_execte_read_timer);
+        jint colunm_count =
+                env->CallNonvirtualIntMethod(_executor_obj, _executor_clazz, _executor_read_id);
+        RETURN_IF_ERROR(JniUtil::GetJniExceptionMsg(env));
+        if (colunm_count != materialize_num) {
+            return Status::InternalError("input and output column num not equal of jdbc query.");
+        }
     }
+
     LOG(INFO) << "JdbcConnector::query has exec success: " << _sql_str;
     RETURN_IF_ERROR(_check_column_type());
     return Status::OK();
 }
 
 Status JdbcConnector::_check_column_type() {
+    SCOPED_TIMER(_jdbc_scanner->_check_type_timer);
     JNIEnv* env = nullptr;
     RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env));
     jobject type_lists =
@@ -332,6 +349,7 @@ Status JdbcConnector::get_next(bool* eos, std::vector<MutableColumnPtr>& columns
     if (!_is_open) {
         return Status::InternalError("get_next before open of jdbc connector.");
     }
+    SCOPED_TIMER(_jdbc_scanner->_get_data_timer);
     JNIEnv* env = nullptr;
     RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env));
     jboolean has_next =
diff --git a/be/src/vec/exec/vjdbc_connector.h b/be/src/vec/exec/vjdbc_connector.h
index e1b61fefea..ee99be8ec5 100644
--- a/be/src/vec/exec/vjdbc_connector.h
+++ b/be/src/vec/exec/vjdbc_connector.h
@@ -28,6 +28,9 @@
 
 namespace doris {
 namespace vectorized {
+
+class NewJdbcScanner;
+
 struct JdbcConnectorParam {
     std::string driver_path;
     std::string driver_class;
@@ -46,6 +49,8 @@ class JdbcConnector : public TableConnector {
 public:
     JdbcConnector(const JdbcConnectorParam& param);
 
+    JdbcConnector(NewJdbcScanner* jdbc_scanner, const JdbcConnectorParam& param);
+
     ~JdbcConnector() override;
 
     Status open(RuntimeState* state, bool read = false) override;
@@ -84,6 +89,7 @@ private:
     Status _cast_string_to_array(const SlotDescriptor* slot_desc, Block* block, int column_index,
                                  int rows);
 
+    NewJdbcScanner* _jdbc_scanner;
     const JdbcConnectorParam& _conn_param;
     //java.sql.Types: https://docs.oracle.com/javase/7/docs/api/constant-values.html#java.sql.Types.INTEGER
     std::map<int, PrimitiveType> _arr_jdbc_map {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org