You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/06/15 20:10:33 UTC
[01/13] hive git commit: HIVE-19532: 03 patch
Repository: hive
Updated Branches:
refs/heads/master-txnstats [created] be3039587
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index bb69105..f92f13c 100644
--- a/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -47,7 +47,7 @@ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT
CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
@@ -75,7 +75,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "
CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N');
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
@@ -106,7 +106,8 @@ CREATE TABLE "APP"."TAB_COL_STATS"(
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
"TBL_ID" BIGINT NOT NULL,
- "BIT_VECTOR" BLOB
+ "BIT_VECTOR" BLOB,
+ "TXN_ID" BIGINT DEFAULT 0
);
CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
@@ -155,7 +156,8 @@ CREATE TABLE "APP"."PART_COL_STATS"(
"NUM_FALSES" BIGINT,
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
- "PART_ID" BIGINT NOT NULL
+ "PART_ID" BIGINT NOT NULL,
+ "TXN_ID" BIGINT DEFAULT 0
);
CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
index a511376..94f8192 100644
--- a/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
@@ -1,5 +1,11 @@
-- Upgrade MetaStore schema from 3.1.0 to 4.0.0
-
+-- HIVE-19416
+ALTER TABLE "APP"."TBLS" ADD WRITEID_LIST CLOB;
+ALTER TABLE "APP"."TBLS" ADD TXN_ID bigint DEFAULT 0;
+ALTER TABLE "APP"."PARTITIONS" ADD WRITEID_LIST CLOB;
+ALTER TABLE "APP"."PARTITIONS" ADD TXN_ID bigint DEFAULT 0;
+ALTER TABLE "APP"."TAB_COL_STATS" ADD TXN_ID bigint DEFAULT 0;
+ALTER TABLE "APP"."PART_COL_STATS" ADD TXN_ID bigint DEFAULT 0;
-- This needs to be the last thing done. Insert any changes above this line.
UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
index c88fb18..f20f910 100644
--- a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
@@ -94,7 +94,8 @@ CREATE TABLE PART_COL_STATS
PART_ID bigint NULL,
PARTITION_NAME nvarchar(767) NOT NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL
+ "CAT_NAME" nvarchar(256) NOT NULL,
+ TXN_ID bigint NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -144,7 +145,9 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME int NOT NULL,
PART_NAME nvarchar(767) NULL,
SD_ID bigint NULL,
- TBL_ID bigint NULL
+ TBL_ID bigint NULL,
+ TXN_ID bigint NULL,
+ WRITEID_LIST text NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -238,7 +241,8 @@ CREATE TABLE TAB_COL_STATS
NUM_TRUES bigint NULL,
TBL_ID bigint NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL
+ "CAT_NAME" nvarchar(256) NOT NULL,
+ TXN_ID bigint NULL
);
ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -369,7 +373,9 @@ CREATE TABLE TBLS
TBL_TYPE nvarchar(128) NULL,
VIEW_EXPANDED_TEXT text NULL,
VIEW_ORIGINAL_TEXT text NULL,
- IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0
+ IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0,
+ TXN_ID bigint NULL,
+ WRITEID_LIST text NULL
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index 922e8fe..22637c5 100644
--- a/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@ -94,7 +94,8 @@ CREATE TABLE PART_COL_STATS
PART_ID bigint NULL,
PARTITION_NAME nvarchar(767) NOT NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL
+ "CAT_NAME" nvarchar(256) NOT NULL,
+ TXN_ID bigint NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -145,7 +146,9 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME int NOT NULL,
PART_NAME nvarchar(767) NULL,
SD_ID bigint NULL,
- TBL_ID bigint NULL
+ TBL_ID bigint NULL,
+ TXN_ID bigint NULL,
+ WRITEID_LIST text NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -242,7 +245,8 @@ CREATE TABLE TAB_COL_STATS
NUM_TRUES bigint NULL,
TBL_ID bigint NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL
+ "CAT_NAME" nvarchar(256) NOT NULL,
+ TXN_ID bigint NULL
);
ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -377,7 +381,9 @@ CREATE TABLE TBLS
TBL_TYPE nvarchar(128) NULL,
VIEW_EXPANDED_TEXT text NULL,
VIEW_ORIGINAL_TEXT text NULL,
- IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0
+ IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0,
+ TXN_ID bigint NULL,
+ WRITEID_LIST text NULL
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
index 27b7026..f0cbf6c 100644
--- a/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
@@ -1,5 +1,13 @@
SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
+-- HIVE-19416
+ALTER TABLE TBLS ADD WRITEID_LIST text NULL;
+ALTER TABLE TBLS ADD TXN_ID bigint NULL;
+ALTER TABLE PARTITIONS ADD WRITEID_LIST text NULL;
+ALTER TABLE PARTITIONS ADD TXN_ID bigint NULL;
+ALTER TABLE TAB_COL_STATS ADD TXN_ID bigint NULL;
+ALTER TABLE PART_COL_STATS ADD TXN_ID bigint NULL;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
index c54df55..6e34ab5 100644
--- a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
@@ -222,6 +222,8 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
`PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
`SD_ID` bigint(20) DEFAULT NULL,
`TBL_ID` bigint(20) DEFAULT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
+ `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`PART_ID`),
UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
KEY `PARTITIONS_N49` (`TBL_ID`),
@@ -625,6 +627,8 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
`VIEW_EXPANDED_TEXT` mediumtext,
`VIEW_ORIGINAL_TEXT` mediumtext,
`IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0,
+ `TXN_ID` bigint(20) DEFAULT 0,
+ `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`TBL_ID`),
UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
KEY `TBLS_N50` (`SD_ID`),
@@ -720,6 +724,7 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -750,6 +755,7 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index 6c40e6e..f8f229d 100644
--- a/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@ -224,6 +224,8 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
`PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
`SD_ID` bigint(20) DEFAULT NULL,
`TBL_ID` bigint(20) DEFAULT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
+ `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`PART_ID`),
UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
KEY `PARTITIONS_N49` (`TBL_ID`),
@@ -629,6 +631,8 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
`VIEW_EXPANDED_TEXT` mediumtext,
`VIEW_ORIGINAL_TEXT` mediumtext,
`IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0,
+ `TXN_ID` bigint(20) DEFAULT 0,
+ `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`TBL_ID`),
UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
KEY `TBLS_N50` (`SD_ID`),
@@ -726,6 +730,7 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -757,6 +762,7 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
index 9b87563..5877c93 100644
--- a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
@@ -323,4 +323,4 @@ ALTER TABLE TXN_COMPONENTS MODIFY COLUMN TC_TABLE varchar(128) NULL;
UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' ';
-ALTER TABLE `TBLS` ADD COLUMN `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL;
\ No newline at end of file
+ALTER TABLE `TBLS` ADD COLUMN `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
index b3789f9..4ca584c 100644
--- a/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
@@ -1,5 +1,13 @@
SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
+-- HIVE-19416
+ALTER TABLE TBLS ADD TXN_ID bigint;
+ALTER TABLE TBLS ADD WRITEID_LIST CLOB;
+ALTER TABLE PARTITIONS ADD TXN_ID bigint;
+ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB;
+ALTER TABLE TAB_COL_STATS ADD TXN_ID bigint;
+ALTER TABLE PART_COL_STATS ADD TXN_ID bigint;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
index 63cc1f7..abdb984 100644
--- a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
@@ -162,7 +162,9 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME NUMBER (10) NOT NULL,
PART_NAME VARCHAR2(767) NULL,
SD_ID NUMBER NULL,
- TBL_ID NUMBER NULL
+ TBL_ID NUMBER NULL,
+ TXN_ID NUMBER NULL,
+ WRITEID_LIST CLOB NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -392,7 +394,9 @@ CREATE TABLE TBLS
TBL_TYPE VARCHAR2(128) NULL,
VIEW_EXPANDED_TEXT CLOB NULL,
VIEW_ORIGINAL_TEXT CLOB NULL,
- IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
+ IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)),
+ TXN_ID NUMBER NULL,
+ WRITEID_LIST CLOB NULL
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
@@ -525,7 +529,8 @@ CREATE TABLE TAB_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
+ LAST_ANALYZED NUMBER NOT NULL,
+ TXN_ID NUMBER NULL
);
CREATE TABLE VERSION (
@@ -563,7 +568,8 @@ CREATE TABLE PART_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
+ LAST_ANALYZED NUMBER NOT NULL,
+ TXN_ID NUMBER NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
@@ -1134,7 +1140,6 @@ CREATE TABLE RUNTIME_STATS (
CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
-
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index e12150a..a143fd2 100644
--- a/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@ -163,7 +163,9 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME NUMBER (10) NOT NULL,
PART_NAME VARCHAR2(767) NULL,
SD_ID NUMBER NULL,
- TBL_ID NUMBER NULL
+ TBL_ID NUMBER NULL,
+ TXN_ID NUMBER NULL,
+ WRITEID_LIST CLOB NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -398,7 +400,9 @@ CREATE TABLE TBLS
TBL_TYPE VARCHAR2(128) NULL,
VIEW_EXPANDED_TEXT CLOB NULL,
VIEW_ORIGINAL_TEXT CLOB NULL,
- IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
+ IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)),
+ TXN_ID NUMBER NULL,
+ WRITEID_LIST CLOB NULL
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
@@ -531,7 +535,8 @@ CREATE TABLE TAB_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
+ LAST_ANALYZED NUMBER NOT NULL,
+ TXN_ID NUMBER NULL
);
ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
@@ -571,7 +576,8 @@ CREATE TABLE PART_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
+ LAST_ANALYZED NUMBER NOT NULL,
+ TXN_ID NUMBER NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
index ce3437f..5b767bc 100644
--- a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
@@ -339,4 +339,4 @@ UPDATE COMPLETED_TXN_COMPONENTS SET CTC_WRITEID = CTC_TXNID;
UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
-ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL;
\ No newline at end of file
+ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
index 6fa5e2d..7ac4d40 100644
--- a/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
@@ -1,5 +1,12 @@
SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
+ALTER TABLE TBLS ADD TXN_ID number NULL;
+ALTER TABLE TBLS ADD WRITEID_LIST CLOB NULL;
+ALTER TABLE PARTITIONS ADD TXN_ID number NULL;
+ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB NULL;
+ALTER TABLE TAB_COL_STATS ADD TXN_ID number NULL;
+ALTER TABLE PART_COL_STATS ADD TXN_ID number NULL;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
index 97697f8..449f295 100644
--- a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
@@ -166,7 +166,9 @@ CREATE TABLE "PARTITIONS" (
"LAST_ACCESS_TIME" bigint NOT NULL,
"PART_NAME" character varying(767) DEFAULT NULL::character varying,
"SD_ID" bigint,
- "TBL_ID" bigint
+ "TBL_ID" bigint,
+ "TXN_ID" bigint,
+ "WRITEID_LIST" text
);
@@ -388,7 +390,9 @@ CREATE TABLE "TBLS" (
"TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
"VIEW_EXPANDED_TEXT" text,
"VIEW_ORIGINAL_TEXT" text,
- "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false
+ "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false,
+ "TXN_ID" bigint,
+ "WRITEID_LIST" text
);
--
@@ -539,7 +543,8 @@ CREATE TABLE "TAB_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
+ "LAST_ANALYZED" bigint NOT NULL,
+ "TXN_ID" bigint
);
--
@@ -577,7 +582,8 @@ CREATE TABLE "PART_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
+ "LAST_ANALYZED" bigint NOT NULL,
+ "TXN_ID" bigint
);
--
@@ -1074,6 +1080,8 @@ ALTER TABLE ONLY "WM_MAPPING"
ALTER TABLE ONLY "WM_MAPPING"
ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+-- Transactional table stats PK constraints
+
--
-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
--
@@ -1618,6 +1626,8 @@ ALTER TABLE ONLY "MV_TABLES_USED"
ALTER TABLE ONLY "MV_TABLES_USED"
ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE;
+-- Transactional table stats FK constraints
+
--
-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
--
@@ -1822,7 +1832,6 @@ CREATE TABLE RUNTIME_STATS (
CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
-
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index b73e1d1..0ead590 100644
--- a/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@ -168,7 +168,9 @@ CREATE TABLE "PARTITIONS" (
"LAST_ACCESS_TIME" bigint NOT NULL,
"PART_NAME" character varying(767) DEFAULT NULL::character varying,
"SD_ID" bigint,
- "TBL_ID" bigint
+ "TBL_ID" bigint,
+ "TXN_ID" bigint,
+ "WRITEID_LIST" text
);
@@ -392,7 +394,9 @@ CREATE TABLE "TBLS" (
"TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
"VIEW_EXPANDED_TEXT" text,
"VIEW_ORIGINAL_TEXT" text,
- "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false
+ "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false,
+ "TXN_ID" bigint,
+ "WRITEID_LIST" text
);
--
@@ -545,7 +549,8 @@ CREATE TABLE "TAB_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
+ "LAST_ANALYZED" bigint NOT NULL,
+ "TXN_ID" bigint
);
--
@@ -583,7 +588,8 @@ CREATE TABLE "PART_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
+ "LAST_ANALYZED" bigint NOT NULL,
+ "TXN_ID" bigint
);
--
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
index 40d2795..f2bae02 100644
--- a/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
@@ -1,5 +1,13 @@
SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0';
+-- HIVE-19416
+ALTER TABLE "TBLS" ADD "TXN_ID" bigint;
+ALTER TABLE "TBLS" ADD "WRITEID_LIST" text;
+ALTER TABLE "PARTITIONS" ADD "TXN_ID" bigint;
+ALTER TABLE "PARTITIONS" ADD "WRITEID_LIST" text;
+ALTER TABLE "TAB_COL_STATS" ADD "TXN_ID" bigint;
+ALTER TABLE "PART_COL_STATS" ADD "TXN_ID" bigint;
+
-- These lines need to be last. Insert any changes above.
UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0';
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift
index 6e503eb..582cf4b 100644
--- a/standalone-metastore/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift
@@ -233,6 +233,12 @@ enum SchemaVersionState {
DELETED = 8
}
+enum IsolationLevelCompliance {
+ YES = 1,
+ NO = 2,
+ UNKNOWN = 3
+}
+
struct HiveObjectRef{
1: HiveObjectType objectType,
2: string dbName,
@@ -430,7 +436,10 @@ struct Table {
15: optional bool rewriteEnabled, // rewrite enabled or not
16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation
17: optional string catName, // Name of the catalog the table is in
- 18: optional PrincipalType ownerType = PrincipalType.USER // owner type of this table (default to USER for backward compatibility)
+ 18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility)
+ 19: optional i64 txnId=-1,
+ 20: optional string validWriteIdList,
+ 21: optional IsolationLevelCompliance isStatsCompliant
}
struct Partition {
@@ -442,7 +451,10 @@ struct Partition {
6: StorageDescriptor sd,
7: map<string, string> parameters,
8: optional PrincipalPrivilegeSet privileges,
- 9: optional string catName
+ 9: optional string catName,
+ 10: optional i64 txnId=-1,
+ 11: optional string validWriteIdList,
+ 12: optional IsolationLevelCompliance isStatsCompliant
}
struct PartitionWithoutSD {
@@ -469,7 +481,10 @@ struct PartitionSpec {
3: string rootPath,
4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
5: optional PartitionListComposingSpec partitionList,
- 6: optional string catName
+ 6: optional string catName,
+ 7: optional i64 txnId=-1,
+ 8: optional string validWriteIdList,
+ 9: optional IsolationLevelCompliance isStatsCompliant
}
// column statistics
@@ -564,17 +579,23 @@ struct ColumnStatisticsDesc {
struct ColumnStatistics {
1: required ColumnStatisticsDesc statsDesc,
-2: required list<ColumnStatisticsObj> statsObj;
+2: required list<ColumnStatisticsObj> statsObj,
+3: optional i64 txnId=-1,
+4: optional string validWriteIdList,
+5: optional IsolationLevelCompliance isStatsCompliant
}
struct AggrStats {
1: required list<ColumnStatisticsObj> colStats,
-2: required i64 partsFound // number of partitions for which stats were found
+2: required i64 partsFound, // number of partitions for which stats were found
+3: optional IsolationLevelCompliance isStatsCompliant
}
struct SetPartitionsStatsRequest {
1: required list<ColumnStatistics> colStats,
-2: optional bool needMerge //stats need to be merged with the existing stats
+2: optional bool needMerge, //stats need to be merged with the existing stats
+3: optional i64 txnId=-1,
+4: optional string validWriteIdList
}
// schema of the table/query results etc.
@@ -703,18 +724,22 @@ struct PartitionsByExprRequest {
}
struct TableStatsResult {
- 1: required list<ColumnStatisticsObj> tableStats
+ 1: required list<ColumnStatisticsObj> tableStats,
+ 2: optional IsolationLevelCompliance isStatsCompliant
}
struct PartitionsStatsResult {
- 1: required map<string, list<ColumnStatisticsObj>> partStats
+ 1: required map<string, list<ColumnStatisticsObj>> partStats,
+ 2: optional IsolationLevelCompliance isStatsCompliant
}
struct TableStatsRequest {
1: required string dbName,
2: required string tblName,
3: required list<string> colNames
- 4: optional string catName
+ 4: optional string catName,
+ 5: optional i64 txnId=-1,
+ 6: optional string validWriteIdList
}
struct PartitionsStatsRequest {
@@ -722,12 +747,15 @@ struct PartitionsStatsRequest {
2: required string tblName,
3: required list<string> colNames,
4: required list<string> partNames,
- 5: optional string catName
+ 5: optional string catName,
+ 6: optional i64 txnId=-1,
+ 7: optional string validWriteIdList
}
// Return type for add_partitions_req
struct AddPartitionsResult {
1: optional list<Partition> partitions,
+ 2: optional IsolationLevelCompliance isStatsCompliant
}
// Request type for add_partitions_req
@@ -737,7 +765,9 @@ struct AddPartitionsRequest {
3: required list<Partition> parts,
4: required bool ifNotExists,
5: optional bool needResult=true,
- 6: optional string catName
+ 6: optional string catName,
+ 7: optional i64 txnId=-1,
+ 8: optional string validWriteIdList
}
// Return type for drop_partitions_req
@@ -1209,11 +1239,14 @@ struct GetTableRequest {
1: required string dbName,
2: required string tblName,
3: optional ClientCapabilities capabilities,
- 4: optional string catName
+ 4: optional string catName,
+ 5: optional i64 txnId=-1,
+ 6: optional string validWriteIdList
}
struct GetTableResult {
- 1: required Table table
+ 1: required Table table,
+ 2: optional IsolationLevelCompliance isStatsCompliant
}
struct GetTablesRequest {
@@ -1874,7 +1907,7 @@ service ThriftHiveMetastore extends fb303.FacebookService
// prehooks are fired together followed by all post hooks
void alter_partitions(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts)
throws (1:InvalidOperationException o1, 2:MetaException o2)
- void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2)
+ void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts, 4:EnvironmentContext environment_context, 5:i64 txnId, 6:string writeIdList) throws (1:InvalidOperationException o1, 2:MetaException o2)
void alter_partition_with_environment_context(1:string db_name,
2:string tbl_name, 3:Partition new_part,
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 7c7429d..6985736 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -19,11 +19,7 @@
package org.apache.hadoop.hive.metastore;
import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+import org.apache.hadoop.hive.metastore.api.*;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@@ -33,58 +29,6 @@ import java.util.Map;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
import org.apache.thrift.TException;
@@ -247,6 +191,12 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public Table getTable(String catName, String dbName, String tableName, long txnId, String writeIdList)
+ throws MetaException {
+ return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList);
+ }
+
+ @Override
public boolean addPartition(Partition part)
throws InvalidObjectException, MetaException {
return objectStore.addPartition(part);
@@ -259,6 +209,13 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public Partition getPartition(String catName, String dbName, String tableName,
+ List<String> partVals, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList);
+ }
+
+ @Override
public boolean dropPartition(String catName, String dbName, String tableName, List<String> partVals)
throws MetaException, NoSuchObjectException,
InvalidObjectException, InvalidInputException {
@@ -343,9 +300,11 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
@Override
public void alterPartitions(String catName, String dbName, String tblName,
- List<List<String>> partValsList, List<Partition> newParts)
+ List<List<String>> partValsList, List<Partition> newParts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
- objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
+ objectStore.alterPartitions(
+ catName, dbName, tblName, partValsList, newParts, txnId, writeIdList);
}
@Override
@@ -647,6 +606,15 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public ColumnStatistics getTableColumnStatistics(String catName, String dbName,
+ String tableName, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getTableColumnStatistics(
+ catName, dbName, tableName, colNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
@@ -739,6 +707,15 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tblName, List<String> partNames,
+ List<String> colNames, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionColumnStatistics(
+ catName, dbName, tblName , colNames, partNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean doesPartitionExist(String catName, String dbName, String tableName,
List<FieldSchema> partKeys, List<String> partVals)
throws MetaException, NoSuchObjectException {
@@ -807,6 +784,15 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public AggrStats get_aggr_stats_for(String catName, String dbName,
+ String tblName, List<String> partNames,
+ List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return null;
+ }
+
+ @Override
public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
return objectStore.getNextNotification(rqst);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index e4f2a17..37e9920 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -19,11 +19,7 @@
package org.apache.hadoop.hive.metastore;
import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+import org.apache.hadoop.hive.metastore.api.*;
import java.nio.ByteBuffer;
import java.util.Collections;
@@ -31,58 +27,6 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
@@ -243,6 +187,12 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public Table getTable(String catalogName, String dbName, String tableName,
+ long txnid, String writeIdList) throws MetaException {
+ return null;
+ }
+
+ @Override
public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
return false;
@@ -256,6 +206,13 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public Partition getPartition(String catName, String dbName, String tableName, List<String> part_vals,
+ long txnid, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return null;
+ }
+
+ @Override
public boolean dropPartition(String catName, String dbName, String tableName, List<String> part_vals)
throws MetaException {
@@ -344,10 +301,10 @@ public class DummyRawStoreForJdoConnection implements RawStore {
@Override
public void alterPartitions(String catName, String db_name, String tbl_name,
- List<List<String>> part_vals_list, List<Partition> new_parts)
+ List<List<String>> part_vals_list, List<Partition> new_parts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
-
}
@Override
@@ -700,6 +657,14 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public ColumnStatistics getTableColumnStatistics(
+ String catName, String dbName, String tableName, List<String> colName,
+ long txnid, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return null;
+ }
+
+ @Override
public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException {
@@ -749,6 +714,14 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tblName, List<String> partNames,
+ List<String> colNames, long txnid, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return Collections.emptyList();
+ }
+
+ @Override
public boolean doesPartitionExist(String catName, String dbName, String tableName,
List<FieldSchema> partKeys, List<String> partVals)
throws MetaException, NoSuchObjectException {
@@ -812,6 +785,14 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public AggrStats get_aggr_stats_for(
+ String catName, String dbName, String tblName, List<String> partNames,
+ List<String> colNames, long txnid, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return null;
+ }
+
+ @Override
public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
return null;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 2d87a2f..a419ac3 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -1429,6 +1429,17 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
return fastpath ? t : deepCopy(filterHook.filterTable(t));
}
+ @Override
+ public Table getTable(String dbName, String tableName, long txnId, String validWriteIdList)
+ throws MetaException, TException, NoSuchObjectException {
+ GetTableRequest req = new GetTableRequest(dbName, tableName);
+ req.setCapabilities(version);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(validWriteIdList);
+ Table t = client.get_table_req(req).getTable();
+ return fastpath ? t : deepCopy(filterHook.filterTable(t));
+ }
+
/** {@inheritDoc} */
@Override
public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
@@ -1612,13 +1623,22 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
@Override
public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
throws InvalidOperationException, MetaException, TException {
- client.alter_partitions_with_environment_context(dbName, tblName, newParts, null);
+ client.alter_partitions_with_environment_context(dbName, tblName, newParts, null, -1, null);
}
@Override
public void alter_partitions(String dbName, String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
throws InvalidOperationException, MetaException, TException {
- client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext);
+ client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext, -1, null);
+ }
+
+ @Override
+ public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
+ throws InvalidOperationException, MetaException, TException {
+ client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext,
+ txnId, writeIdList);
}
@Override
@@ -1727,6 +1747,17 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
new TableStatsRequest(dbName, tableName, colNames)).getTableStats();
}
+ @Override
+ public List<ColumnStatisticsObj> getTableColumnStatistics(
+ String dbName, String tableName, List<String> colNames, long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames);
+ tsr.setTxnId(txnId);
+ tsr.setValidWriteIdList(validWriteIdList);
+
+ return client.get_table_statistics_req(tsr).getTableStats();
+ }
+
/** {@inheritDoc} */
@Override
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
@@ -1736,6 +1767,18 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats();
}
+ @Override
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String dbName, String tableName, List<String> partNames,
+ List<String> colNames, long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames);
+ psr.setTxnId(txnId);
+ psr.setValidWriteIdList(validWriteIdList);
+ return client.get_partitions_statistics_req(
+ psr).getPartStats();
+ }
+
/** {@inheritDoc} */
@Override
public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
@@ -2593,6 +2636,21 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public AggrStats getAggrColStatsFor(
+ String dbName, String tblName, List<String> colNames,
+ List<String> partName, long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ if (colNames.isEmpty() || partName.isEmpty()) {
+ LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
+ return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
+ }
+ PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partName);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(writeIdList);
+ return client.get_aggr_stats_for(req);
+ }
+
+ @Override
public Iterable<Entry<Long, ByteBuffer>> getFileMetadata(
final List<Long> fileIds) throws TException {
return new MetastoreMapIterable<Long, ByteBuffer>() {
@@ -3000,6 +3058,12 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public Table getTable(String catName, String dbName, String tableName,
+ long txnId, String validWriteIdList) throws TException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public List<Table> getTableObjectsByName(String catName, String dbName,
List<String> tableNames) throws MetaException,
InvalidOperationException, UnknownDBException, TException {
@@ -3226,7 +3290,8 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
@Override
public void alter_partitions(String catName, String dbName, String tblName,
List<Partition> newParts,
- EnvironmentContext environmentContext) throws
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList) throws
InvalidOperationException, MetaException, TException {
throw new UnsupportedOperationException();
}
@@ -3259,6 +3324,14 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public List<ColumnStatisticsObj> getTableColumnStatistics(
+ String catName, String dbName, String tableName, List<String> colNames,
+ long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(String catName,
String dbName,
String tableName,
@@ -3269,6 +3342,14 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String catName, String dbName, String tableName, List<String> partNames,
+ List<String> colNames, long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
String partName, String colName) throws
NoSuchObjectException, MetaException, InvalidObjectException, TException,
@@ -3316,6 +3397,14 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName,
+ List<String> colNames, List<String> partNames,
+ long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public void dropConstraint(String catName, String dbName, String tableName,
String constraintName) throws MetaException, NoSuchObjectException,
TException {
@@ -3420,4 +3509,5 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
public List<RuntimeStat> getRuntimeStats(int maxWeight, int maxCreateTime) throws TException {
throw new UnsupportedOperationException();
}
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index 54bf3d7..f19b505 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@ -835,7 +835,8 @@ public class TestAlterPartitions extends MetaStoreClientTest {
public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception {
createTable4PartColsParts(client);
Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
- client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
+ client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext(),
+ -1, null);
}
@Test(expected = InvalidOperationException.class)
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java
index 9867a81..cfe01fe 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java
@@ -63,6 +63,10 @@ public class ValidTxnWriteIdList {
return null;
}
+ public boolean isEmpty() {
+ return tablesValidWriteIdList.isEmpty();
+ }
+
// Each ValidWriteIdList is separated with "$" and each one maps to one table
// Format <txnId>$<table_name>:<hwm>:<minOpenWriteId>:<open_writeids>:<abort_writeids>$<table_name>...
private void readFromString(String src) {
[04/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
index a29ebb7..4a2bdc7 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -203,6 +203,17 @@ final class SchemaVersionState {
);
}
+final class IsolationLevelCompliance {
+ const YES = 1;
+ const NO = 2;
+ const UNKNOWN = 3;
+ static public $__names = array(
+ 1 => 'YES',
+ 2 => 'NO',
+ 3 => 'UNKNOWN',
+ );
+}
+
final class FunctionType {
const JAVA = 1;
static public $__names = array(
@@ -6517,6 +6528,18 @@ class Table {
* @var int
*/
public $ownerType = 1;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -6609,6 +6632,18 @@ class Table {
'var' => 'ownerType',
'type' => TType::I32,
),
+ 19 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 20 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
+ 21 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -6666,6 +6701,15 @@ class Table {
if (isset($vals['ownerType'])) {
$this->ownerType = $vals['ownerType'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -6841,6 +6885,27 @@ class Table {
$xfer += $input->skip($ftype);
}
break;
+ case 19:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 20:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 21:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -6978,6 +7043,21 @@ class Table {
$xfer += $output->writeI32($this->ownerType);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 19);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 20);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 21);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -7024,6 +7104,18 @@ class Partition {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -7078,6 +7170,18 @@ class Partition {
'var' => 'catName',
'type' => TType::STRING,
),
+ 10 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 11 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
+ 12 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -7108,6 +7212,15 @@ class Partition {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -7218,6 +7331,27 @@ class Partition {
$xfer += $input->skip($ftype);
}
break;
+ case 10:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 11:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 12:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -7307,6 +7441,21 @@ class Partition {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 10);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 11);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 12);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -7830,6 +7979,18 @@ class PartitionSpec {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -7860,6 +8021,18 @@ class PartitionSpec {
'var' => 'catName',
'type' => TType::STRING,
),
+ 7 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 8 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
+ 9 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -7881,6 +8054,15 @@ class PartitionSpec {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -7947,6 +8129,27 @@ class PartitionSpec {
$xfer += $input->skip($ftype);
}
break;
+ case 7:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 8:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 9:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -7996,6 +8199,21 @@ class PartitionSpec {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 7);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 9);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -9894,6 +10112,18 @@ class ColumnStatistics {
* @var \metastore\ColumnStatisticsObj[]
*/
public $statsObj = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -9912,6 +10142,18 @@ class ColumnStatistics {
'class' => '\metastore\ColumnStatisticsObj',
),
),
+ 3 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 4 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
+ 5 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -9921,6 +10163,15 @@ class ColumnStatistics {
if (isset($vals['statsObj'])) {
$this->statsObj = $vals['statsObj'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -9969,6 +10220,27 @@ class ColumnStatistics {
$xfer += $input->skip($ftype);
}
break;
+ case 3:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 4:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 5:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -10007,6 +10279,21 @@ class ColumnStatistics {
}
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 3);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 5);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -10025,6 +10312,10 @@ class AggrStats {
* @var int
*/
public $partsFound = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -10042,6 +10333,10 @@ class AggrStats {
'var' => 'partsFound',
'type' => TType::I64,
),
+ 3 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -10051,6 +10346,9 @@ class AggrStats {
if (isset($vals['partsFound'])) {
$this->partsFound = $vals['partsFound'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -10098,6 +10396,13 @@ class AggrStats {
$xfer += $input->skip($ftype);
}
break;
+ case 3:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -10133,6 +10438,11 @@ class AggrStats {
$xfer += $output->writeI64($this->partsFound);
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 3);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -10151,6 +10461,14 @@ class SetPartitionsStatsRequest {
* @var bool
*/
public $needMerge = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -10168,6 +10486,14 @@ class SetPartitionsStatsRequest {
'var' => 'needMerge',
'type' => TType::BOOL,
),
+ 3 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 4 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -10177,6 +10503,12 @@ class SetPartitionsStatsRequest {
if (isset($vals['needMerge'])) {
$this->needMerge = $vals['needMerge'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -10224,6 +10556,20 @@ class SetPartitionsStatsRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 3:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 4:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -10259,6 +10605,16 @@ class SetPartitionsStatsRequest {
$xfer += $output->writeBool($this->needMerge);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 3);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13010,6 +13366,10 @@ class TableStatsResult {
* @var \metastore\ColumnStatisticsObj[]
*/
public $tableStats = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13023,12 +13383,19 @@ class TableStatsResult {
'class' => '\metastore\ColumnStatisticsObj',
),
),
+ 2 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
if (isset($vals['tableStats'])) {
$this->tableStats = $vals['tableStats'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -13069,6 +13436,13 @@ class TableStatsResult {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13099,6 +13473,11 @@ class TableStatsResult {
}
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13113,6 +13492,10 @@ class PartitionsStatsResult {
* @var array
*/
public $partStats = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13134,12 +13517,19 @@ class PartitionsStatsResult {
),
),
),
+ 2 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
if (isset($vals['partStats'])) {
$this->partStats = $vals['partStats'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -13193,6 +13583,13 @@ class PartitionsStatsResult {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13233,6 +13630,11 @@ class PartitionsStatsResult {
}
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13259,6 +13661,14 @@ class TableStatsRequest {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13283,6 +13693,14 @@ class TableStatsRequest {
'var' => 'catName',
'type' => TType::STRING,
),
+ 5 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 6 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -13298,6 +13716,12 @@ class TableStatsRequest {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -13358,6 +13782,20 @@ class TableStatsRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 5:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 6:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13403,6 +13841,16 @@ class TableStatsRequest {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 5);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13433,6 +13881,14 @@ class PartitionsStatsRequest {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13465,6 +13921,14 @@ class PartitionsStatsRequest {
'var' => 'catName',
'type' => TType::STRING,
),
+ 6 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 7 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -13483,6 +13947,12 @@ class PartitionsStatsRequest {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -13560,6 +14030,20 @@ class PartitionsStatsRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 6:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 7:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13622,6 +14106,16 @@ class PartitionsStatsRequest {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 6);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13636,6 +14130,10 @@ class AddPartitionsResult {
* @var \metastore\Partition[]
*/
public $partitions = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13649,12 +14147,19 @@ class AddPartitionsResult {
'class' => '\metastore\Partition',
),
),
+ 2 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
if (isset($vals['partitions'])) {
$this->partitions = $vals['partitions'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -13695,6 +14200,13 @@ class AddPartitionsResult {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13725,6 +14237,11 @@ class AddPartitionsResult {
}
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13759,6 +14276,14 @@ class AddPartitionsRequest {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13792,6 +14317,14 @@ class AddPartitionsRequest {
'var' => 'catName',
'type' => TType::STRING,
),
+ 7 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 8 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -13813,6 +14346,12 @@ class AddPartitionsRequest {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -13888,6 +14427,20 @@ class AddPartitionsRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 7:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 8:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13943,6 +14496,16 @@ class AddPartitionsRequest {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 7);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -23596,6 +24159,14 @@ class GetTableRequest {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -23617,6 +24188,14 @@ class GetTableRequest {
'var' => 'catName',
'type' => TType::STRING,
),
+ 5 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 6 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -23632,6 +24211,12 @@ class GetTableRequest {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -23683,6 +24268,20 @@ class GetTableRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 5:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 6:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -23719,6 +24318,16 @@ class GetTableRequest {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 5);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -23733,6 +24342,10 @@ class GetTableResult {
* @var \metastore\Table
*/
public $table = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -23742,12 +24355,19 @@ class GetTableResult {
'type' => TType::STRUCT,
'class' => '\metastore\Table',
),
+ 2 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
if (isset($vals['table'])) {
$this->table = $vals['table'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -23778,6 +24398,13 @@ class GetTableResult {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -23799,6 +24426,11 @@ class GetTableResult {
$xfer += $this->table->write($output);
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 5402372..7a438db 100755
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -107,7 +107,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print(' get_partitions_by_names(string db_name, string tbl_name, names)')
print(' void alter_partition(string db_name, string tbl_name, Partition new_part)')
print(' void alter_partitions(string db_name, string tbl_name, new_parts)')
- print(' void alter_partitions_with_environment_context(string db_name, string tbl_name, new_parts, EnvironmentContext environment_context)')
+ print(' void alter_partitions_with_environment_context(string db_name, string tbl_name, new_parts, EnvironmentContext environment_context, i64 txnId, string writeIdList)')
print(' void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)')
print(' void rename_partition(string db_name, string tbl_name, part_vals, Partition new_part)')
print(' bool partition_name_has_valid_characters( part_vals, bool throw_exception)')
@@ -799,10 +799,10 @@ elif cmd == 'alter_partitions':
pp.pprint(client.alter_partitions(args[0],args[1],eval(args[2]),))
elif cmd == 'alter_partitions_with_environment_context':
- if len(args) != 4:
- print('alter_partitions_with_environment_context requires 4 args')
+ if len(args) != 6:
+ print('alter_partitions_with_environment_context requires 6 args')
sys.exit(1)
- pp.pprint(client.alter_partitions_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),))
+ pp.pprint(client.alter_partitions_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),eval(args[4]),args[5],))
elif cmd == 'alter_partition_with_environment_context':
if len(args) != 4:
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 5a3f2c1..665d401 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -717,13 +717,15 @@ class Iface(fb303.FacebookService.Iface):
"""
pass
- def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context):
+ def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context, txnId, writeIdList):
"""
Parameters:
- db_name
- tbl_name
- new_parts
- environment_context
+ - txnId
+ - writeIdList
"""
pass
@@ -4734,24 +4736,28 @@ class Client(fb303.FacebookService.Client, Iface):
raise result.o2
return
- def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context):
+ def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context, txnId, writeIdList):
"""
Parameters:
- db_name
- tbl_name
- new_parts
- environment_context
+ - txnId
+ - writeIdList
"""
- self.send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
+ self.send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context, txnId, writeIdList)
self.recv_alter_partitions_with_environment_context()
- def send_alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context):
+ def send_alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context, txnId, writeIdList):
self._oprot.writeMessageBegin('alter_partitions_with_environment_context', TMessageType.CALL, self._seqid)
args = alter_partitions_with_environment_context_args()
args.db_name = db_name
args.tbl_name = tbl_name
args.new_parts = new_parts
args.environment_context = environment_context
+ args.txnId = txnId
+ args.writeIdList = writeIdList
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
@@ -11366,7 +11372,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
iprot.readMessageEnd()
result = alter_partitions_with_environment_context_result()
try:
- self._handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context)
+ self._handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context, args.txnId, args.writeIdList)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
@@ -29394,6 +29400,8 @@ class alter_partitions_with_environment_context_args:
- tbl_name
- new_parts
- environment_context
+ - txnId
+ - writeIdList
"""
thrift_spec = (
@@ -29402,13 +29410,17 @@ class alter_partitions_with_environment_context_args:
(2, TType.STRING, 'tbl_name', None, None, ), # 2
(3, TType.LIST, 'new_parts', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3
(4, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4
+ (5, TType.I64, 'txnId', None, None, ), # 5
+ (6, TType.STRING, 'writeIdList', None, None, ), # 6
)
- def __init__(self, db_name=None, tbl_name=None, new_parts=None, environment_context=None,):
+ def __init__(self, db_name=None, tbl_name=None, new_parts=None, environment_context=None, txnId=None, writeIdList=None,):
self.db_name = db_name
self.tbl_name = tbl_name
self.new_parts = new_parts
self.environment_context = environment_context
+ self.txnId = txnId
+ self.writeIdList = writeIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -29446,6 +29458,16 @@ class alter_partitions_with_environment_context_args:
self.environment_context.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.STRING:
+ self.writeIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -29475,6 +29497,14 @@ class alter_partitions_with_environment_context_args:
oprot.writeFieldBegin('environment_context', TType.STRUCT, 4)
self.environment_context.write(oprot)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 5)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.writeIdList is not None:
+ oprot.writeFieldBegin('writeIdList', TType.STRING, 6)
+ oprot.writeString(self.writeIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -29488,6 +29518,8 @@ class alter_partitions_with_environment_context_args:
value = (value * 31) ^ hash(self.tbl_name)
value = (value * 31) ^ hash(self.new_parts)
value = (value * 31) ^ hash(self.environment_context)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.writeIdList)
return value
def __repr__(self):
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 11affe3..899b744 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -305,6 +305,23 @@ class SchemaVersionState:
"DELETED": 8,
}
+class IsolationLevelCompliance:
+ YES = 1
+ NO = 2
+ UNKNOWN = 3
+
+ _VALUES_TO_NAMES = {
+ 1: "YES",
+ 2: "NO",
+ 3: "UNKNOWN",
+ }
+
+ _NAMES_TO_VALUES = {
+ "YES": 1,
+ "NO": 2,
+ "UNKNOWN": 3,
+ }
+
class FunctionType:
JAVA = 1
@@ -4550,6 +4567,9 @@ class Table:
- creationMetadata
- catName
- ownerType
+ - txnId
+ - validWriteIdList
+ - isStatsCompliant
"""
thrift_spec = (
@@ -4572,9 +4592,12 @@ class Table:
(16, TType.STRUCT, 'creationMetadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 16
(17, TType.STRING, 'catName', None, None, ), # 17
(18, TType.I32, 'ownerType', None, 1, ), # 18
+ (19, TType.I64, 'txnId', None, -1, ), # 19
+ (20, TType.STRING, 'validWriteIdList', None, None, ), # 20
+ (21, TType.I32, 'isStatsCompliant', None, None, ), # 21
)
- def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4],):
+ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4], txnId=thrift_spec[19][4], validWriteIdList=None, isStatsCompliant=None,):
self.tableName = tableName
self.dbName = dbName
self.owner = owner
@@ -4593,6 +4616,9 @@ class Table:
self.creationMetadata = creationMetadata
self.catName = catName
self.ownerType = ownerType
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -4708,6 +4734,21 @@ class Table:
self.ownerType = iprot.readI32()
else:
iprot.skip(ftype)
+ elif fid == 19:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 20:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 21:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -4797,6 +4838,18 @@ class Table:
oprot.writeFieldBegin('ownerType', TType.I32, 18)
oprot.writeI32(self.ownerType)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 19)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 20)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 21)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -4824,6 +4877,9 @@ class Table:
value = (value * 31) ^ hash(self.creationMetadata)
value = (value * 31) ^ hash(self.catName)
value = (value * 31) ^ hash(self.ownerType)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -4849,6 +4905,9 @@ class Partition:
- parameters
- privileges
- catName
+ - txnId
+ - validWriteIdList
+ - isStatsCompliant
"""
thrift_spec = (
@@ -4862,9 +4921,12 @@ class Partition:
(7, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 7
(8, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 8
(9, TType.STRING, 'catName', None, None, ), # 9
+ (10, TType.I64, 'txnId', None, -1, ), # 10
+ (11, TType.STRING, 'validWriteIdList', None, None, ), # 11
+ (12, TType.I32, 'isStatsCompliant', None, None, ), # 12
)
- def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None,):
+ def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None, txnId=thrift_spec[10][4], validWriteIdList=None, isStatsCompliant=None,):
self.values = values
self.dbName = dbName
self.tableName = tableName
@@ -4874,6 +4936,9 @@ class Partition:
self.parameters = parameters
self.privileges = privileges
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -4942,6 +5007,21 @@ class Partition:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 10:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 11:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 12:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -4995,6 +5075,18 @@ class Partition:
oprot.writeFieldBegin('catName', TType.STRING, 9)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 10)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 11)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 12)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -5013,6 +5105,9 @@ class Partition:
value = (value * 31) ^ hash(self.parameters)
value = (value * 31) ^ hash(self.privileges)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -5346,6 +5441,9 @@ class PartitionSpec:
- sharedSDPartitionSpec
- partitionList
- catName
+ - txnId
+ - validWriteIdList
+ - isStatsCompliant
"""
thrift_spec = (
@@ -5356,15 +5454,21 @@ class PartitionSpec:
(4, TType.STRUCT, 'sharedSDPartitionSpec', (PartitionSpecWithSharedSD, PartitionSpecWithSharedSD.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'partitionList', (PartitionListComposingSpec, PartitionListComposingSpec.thrift_spec), None, ), # 5
(6, TType.STRING, 'catName', None, None, ), # 6
+ (7, TType.I64, 'txnId', None, -1, ), # 7
+ (8, TType.STRING, 'validWriteIdList', None, None, ), # 8
+ (9, TType.I32, 'isStatsCompliant', None, None, ), # 9
)
- def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None,):
+ def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None, txnId=thrift_spec[7][4], validWriteIdList=None, isStatsCompliant=None,):
self.dbName = dbName
self.tableName = tableName
self.rootPath = rootPath
self.sharedSDPartitionSpec = sharedSDPartitionSpec
self.partitionList = partitionList
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -5407,6 +5511,21 @@ class PartitionSpec:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 7:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 8:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 9:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -5441,6 +5560,18 @@ class PartitionSpec:
oprot.writeFieldBegin('catName', TType.STRING, 6)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 7)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 9)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -5456,6 +5587,9 @@ class PartitionSpec:
value = (value * 31) ^ hash(self.sharedSDPartitionSpec)
value = (value * 31) ^ hash(self.partitionList)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -6841,17 +6975,26 @@ class ColumnStatistics:
Attributes:
- statsDesc
- statsObj
+ - txnId
+ - validWriteIdList
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'statsDesc', (ColumnStatisticsDesc, ColumnStatisticsDesc.thrift_spec), None, ), # 1
(2, TType.LIST, 'statsObj', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 2
+ (3, TType.I64, 'txnId', None, -1, ), # 3
+ (4, TType.STRING, 'validWriteIdList', None, None, ), # 4
+ (5, TType.I32, 'isStatsCompliant', None, None, ), # 5
)
- def __init__(self, statsDesc=None, statsObj=None,):
+ def __init__(self, statsDesc=None, statsObj=None, txnId=thrift_spec[3][4], validWriteIdList=None, isStatsCompliant=None,):
self.statsDesc = statsDesc
self.statsObj = statsObj
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -6879,6 +7022,21 @@ class ColumnStatistics:
iprot.readListEnd()
else:
iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -6900,6 +7058,18 @@ class ColumnStatistics:
iter243.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 3)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 5)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -6915,6 +7085,9 @@ class ColumnStatistics:
value = 17
value = (value * 31) ^ hash(self.statsDesc)
value = (value * 31) ^ hash(self.statsObj)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -6933,17 +7106,20 @@ class AggrStats:
Attributes:
- colStats
- partsFound
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1
(2, TType.I64, 'partsFound', None, None, ), # 2
+ (3, TType.I32, 'isStatsCompliant', None, None, ), # 3
)
- def __init__(self, colStats=None, partsFound=None,):
+ def __init__(self, colStats=None, partsFound=None, isStatsCompliant=None,):
self.colStats = colStats
self.partsFound = partsFound
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -6970,6 +7146,11 @@ class AggrStats:
self.partsFound = iprot.readI64()
else:
iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -6991,6 +7172,10 @@ class AggrStats:
oprot.writeFieldBegin('partsFound', TType.I64, 2)
oprot.writeI64(self.partsFound)
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 3)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -7006,6 +7191,7 @@ class AggrStats:
value = 17
value = (value * 31) ^ hash(self.colStats)
value = (value * 31) ^ hash(self.partsFound)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -7024,17 +7210,23 @@ class SetPartitionsStatsRequest:
Attributes:
- colStats
- needMerge
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1
(2, TType.BOOL, 'needMerge', None, None, ), # 2
+ (3, TType.I64, 'txnId', None, -1, ), # 3
+ (4, TType.STRING, 'validWriteIdList', None, None, ), # 4
)
- def __init__(self, colStats=None, needMerge=None,):
+ def __init__(self, colStats=None, needMerge=None, txnId=thrift_spec[3][4], validWriteIdList=None,):
self.colStats = colStats
self.needMerge = needMerge
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -7061,6 +7253,16 @@ class SetPartitionsStatsRequest:
self.needMerge = iprot.readBool()
else:
iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -7082,6 +7284,14 @@ class SetPartitionsStatsRequest:
oprot.writeFieldBegin('needMerge', TType.BOOL, 2)
oprot.writeBool(self.needMerge)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 3)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -7095,6 +7305,8 @@ class SetPartitionsStatsRequest:
value = 17
value = (value * 31) ^ hash(self.colStats)
value = (value * 31) ^ hash(self.needMerge)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -9133,15 +9345,18 @@ class TableStatsResult:
"""
Attributes:
- tableStats
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'tableStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1
+ (2, TType.I32, 'isStatsCompliant', None, None, ), # 2
)
- def __init__(self, tableStats=None,):
+ def __init__(self, tableStats=None, isStatsCompliant=None,):
self.tableStats = tableStats
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9163,6 +9378,11 @@ class TableStatsResult:
iprot.readListEnd()
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9180,6 +9400,10 @@ class TableStatsResult:
iter380.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9192,6 +9416,7 @@ class TableStatsResult:
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.tableStats)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -9209,15 +9434,18 @@ class PartitionsStatsResult:
"""
Attributes:
- partStats
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'partStats', (TType.STRING,None,TType.LIST,(TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec))), None, ), # 1
+ (2, TType.I32, 'isStatsCompliant', None, None, ), # 2
)
- def __init__(self, partStats=None,):
+ def __init__(self, partStats=None, isStatsCompliant=None,):
self.partStats = partStats
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9245,6 +9473,11 @@ class PartitionsStatsResult:
iprot.readMapEnd()
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9266,6 +9499,10 @@ class PartitionsStatsResult:
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9278,6 +9515,7 @@ class PartitionsStatsResult:
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.partStats)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -9298,6 +9536,8 @@ class TableStatsRequest:
- tblName
- colNames
- catName
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
@@ -9306,13 +9546,17 @@ class TableStatsRequest:
(2, TType.STRING, 'tblName', None, None, ), # 2
(3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3
(4, TType.STRING, 'catName', None, None, ), # 4
+ (5, TType.I64, 'txnId', None, -1, ), # 5
+ (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
)
- def __init__(self, dbName=None, tblName=None, colNames=None, catName=None,):
+ def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,):
self.dbName = dbName
self.tblName = tblName
self.colNames = colNames
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9348,6 +9592,16 @@ class TableStatsRequest:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9377,6 +9631,14 @@ class TableStatsRequest:
oprot.writeFieldBegin('catName', TType.STRING, 4)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 5)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9396,6 +9658,8 @@ class TableStatsRequest:
value = (value * 31) ^ hash(self.tblName)
value = (value * 31) ^ hash(self.colNames)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -9417,6 +9681,8 @@ class PartitionsStatsRequest:
- colNames
- partNames
- catName
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
@@ -9426,14 +9692,18 @@ class PartitionsStatsRequest:
(3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3
(4, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 4
(5, TType.STRING, 'catName', None, None, ), # 5
+ (6, TType.I64, 'txnId', None, -1, ), # 6
+ (7, TType.STRING, 'validWriteIdList', None, None, ), # 7
)
- def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None,):
+ def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, txnId=thrift_spec[6][4], validWriteIdList=None,):
self.dbName = dbName
self.tblName = tblName
self.colNames = colNames
self.partNames = partNames
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9479,6 +9749,16 @@ class PartitionsStatsRequest:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 7:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9515,6 +9795,14 @@ class PartitionsStatsRequest:
oprot.writeFieldBegin('catName', TType.STRING, 5)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 6)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9537,6 +9825,8 @@ class PartitionsStatsRequest:
value = (value * 31) ^ hash(self.colNames)
value = (value * 31) ^ hash(self.partNames)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -9554,15 +9844,18 @@ class AddPartitionsResult:
"""
Attributes:
- partitions
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1
+ (2, TType.I32, 'isStatsCompliant', None, None, ), # 2
)
- def __init__(self, partitions=None,):
+ def __init__(self, partitions=None, isStatsCompliant=None,):
self.partitions = partitions
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9584,6 +9877,11 @@ class AddPartitionsResult:
iprot.readListEnd()
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9601,6 +9899,10 @@ class AddPartitionsResult:
iter424.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9611,6 +9913,7 @@ class AddPartitionsResult:
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.partitions)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -9633,6 +9936,8 @@ class AddPartitionsRequest:
- ifNotExists
- needResult
- catName
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
@@ -9643,15 +9948,19 @@ class AddPartitionsRequest:
(4, TType.BOOL, 'ifNotExists', None, None, ), # 4
(5, TType.BOOL, 'needResult', None, True, ), # 5
(6, TType.STRING, 'catName', None, None, ), # 6
+ (7, TType.I64, 'txnId', None, -1, ), # 7
+ (8, TType.STRING, 'validWriteIdList', None, None, ), # 8
)
- def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None,):
+ def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None, txnId=thrift_spec[7][4], validWriteIdList=None,):
self.dbName = dbName
self.tblName = tblName
self.parts = parts
self.ifNotExists = ifNotExists
self.needResult = needResult
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9698,6 +10007,16 @@ class AddPartitionsRequest:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 7:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 8:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9735,6 +10054,14 @@ class AddPartitionsRequest:
oprot.writeFieldBegin('catName', TType.STRING, 6)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 7)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9758,6 +10085,8 @@ class AddPartitionsRequest:
value = (value * 31) ^ hash(self.ifNotExists)
value = (value * 31) ^ hash(self.needResult)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -16609,6 +16938,8 @@ class GetTableRequest:
- tblName
- capabilities
- catName
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
@@ -16617,13 +16948,17 @@ class GetTableRequest:
(2, TType.STRING, 'tblName', None, None, ), # 2
(3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3
(4, TType.STRING, 'catName', None, None, ), # 4
+ (5, TType.I64, 'txnId', None, -1, ), # 5
+ (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
)
- def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None,):
+ def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,):
self.dbName = dbName
self.tblName = tblName
self.capabilities = capabilities
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -16655,6 +16990,16 @@ class GetTableRequest:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -16681,6 +17026,14 @@ class GetTableRequest:
oprot.writeFieldBegin('catName', TType.STRING, 4)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 5)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -16698,6 +17051,8 @@ class GetTableRequest:
value = (value * 31) ^ hash(self.tblName)
value = (value * 31) ^ hash(self.capabilities)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -16715,15 +17070,18 @@ class GetTableResult:
"""
Attributes:
- table
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'table', (Table, Table.thrift_spec), None, ), # 1
+ (2, TType.I32, 'isStatsCompliant', None, None, ), # 2
)
- def __init__(self, table=None,):
+ def __init__(self, table=None, isStatsCompliant=None,):
self.table = table
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -16740,6 +17098,11 @@ class GetTableResult:
self.table.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -16754,6 +17117,10 @@ class GetTableResult:
oprot.writeFieldBegin('table', TType.STRUCT, 1)
self.table.write(oprot)
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -16766,6 +17133,7 @@ class GetTableResult:
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
[12/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/test/results/clientpositive/stats_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_part.q.out b/ql/src/test/results/clientpositive/stats_part.q.out
new file mode 100644
index 0000000..5e3c271
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_part.q.out
@@ -0,0 +1,650 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int, key int, value int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int, key int, value int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20,201), (101,40,401), (102,50,501)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20,201), (101,40,401), (102,50,501)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: insert into mysource values (100,21,211), (101,41,411), (102,51,511)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,21,211), (101,41,411), (102,51,511)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: drop table if exists stats_partitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_partitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_part
+POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_part
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Filter Operator
+ predicate: (p > 100) (type: boolean)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: count(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 0
+ numPartitions 0
+ numRows 0
+ rawDataSize 0
+ totalSize 0
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=100
+POSTHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=100
+POSTHOOK: Lineage: stats_part PARTITION(p=100).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=100).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=101
+POSTHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=101).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=102
+POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=102).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 3
+ numPartitions 3
+ numRows 6
+ rawDataSize 546
+ totalSize 930
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table mysource values (103,20,200), (103,83,832), (103,53,530)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into table mysource values (103,20,200), (103,83,832), (103,53,530)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=102
+POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=102).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 4
+ numPartitions 3
+ numRows 8
+ rawDataSize 728
+ totalSize 1241
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: show partitions stats_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: show partitions stats_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@stats_part
+p=100
+p=101
+p=102
+PREHOOK: query: explain select count(*) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+51
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 4
+ numPartitions 3
+ numRows 8
+ rawDataSize 728
+ totalSize 1241
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+51
+PREHOOK: query: select count(value) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(value) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: select count(value) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(value) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 4
+ numPartitions 3
+ numRows 8
+ rawDataSize 728
+ totalSize 1241
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select count(*) from stats_part where p = 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p = 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p = 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p = 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+2
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+51
+PREHOOK: query: describe extended stats_part partition (p=101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: describe extended stats_part partition (p=101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+key int
+value string
+p int
+
+# Partition Information
+# col_name data_type comment
+p int
+
+#### A masked pattern was here ####
+PREHOOK: query: describe extended stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: describe extended stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+key int
+value string
+p int
+
+# Partition Information
+# col_name data_type comment
+p int
+
+#### A masked pattern was here ####
[11/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/test/results/clientpositive/stats_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_part2.q.out b/ql/src/test/results/clientpositive/stats_part2.q.out
new file mode 100644
index 0000000..a296c15
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_part2.q.out
@@ -0,0 +1,1598 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int, key int, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int, key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: drop table if exists stats_partitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_partitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_part
+POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_part
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: (p > 100) (type: boolean)
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Filter Operator
+ predicate: (p > 100) (type: boolean)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: max(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: int)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 0
+ numPartitions 0
+ numRows 0
+ rawDataSize 0
+ totalSize 0
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=100
+POSTHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=100
+POSTHOOK: Lineage: stats_part PARTITION(p=100).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=100).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=101
+POSTHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=101).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=102
+POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=102).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 3
+ numPartitions 3
+ numRows 6
+ rawDataSize 0
+ totalSize 2337
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: count(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: max(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: int)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=102
+POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=102).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 4
+ numPartitions 3
+ numRows 8
+ rawDataSize 0
+ totalSize 3126
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: show partitions stats_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: show partitions stats_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@stats_part
+p=100
+p=101
+p=102
+PREHOOK: query: explain select count(*) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 8 Data size: 31324 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 8 Data size: 31324 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: count(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: max(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: int)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+51
+PREHOOK: query: desc formatted stats_part partition(p = 100)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 100)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [100]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 758
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [101]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 789
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 102)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 102)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [102]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 2
+ numRows 4
+ rawDataSize 0
+ totalSize 1579
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: update stats_part set key = key + 100 where key in(-50,40) and p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+PREHOOK: Output: default@stats_part@p=101
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: update stats_part set key = key + 100 where key in(-50,40) and p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Output: default@stats_part@p=102
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: max(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: int)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+140
+PREHOOK: query: desc formatted stats_part partition(p = 100)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 100)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [100]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 758
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [101]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 3
+ numRows 2
+ rawDataSize 0
+ totalSize 2238
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 102)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 102)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [102]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 2
+ numRows 4
+ rawDataSize 0
+ totalSize 1579
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select count(value) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(value) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+8
+PREHOOK: query: update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+PREHOOK: Output: default@stats_part@p=101
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Output: default@stats_part@p=102
+PREHOOK: query: desc formatted stats_part partition(p = 100)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 100)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [100]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 758
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [101]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 3
+ numRows 2
+ rawDataSize 0
+ totalSize 2238
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 102)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 102)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [102]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 2
+ numRows 4
+ rawDataSize 0
+ totalSize 1579
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select count(value) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(value) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+8
+PREHOOK: query: delete from stats_part where key in (20, 41)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+PREHOOK: Output: default@stats_part@p=100
+PREHOOK: Output: default@stats_part@p=101
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: delete from stats_part where key in (20, 41)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+POSTHOOK: Output: default@stats_part@p=100
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Output: default@stats_part@p=102
+PREHOOK: query: desc formatted stats_part partition(p = 100)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 100)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [100]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 2
+ numRows 1
+ rawDataSize 0
+ totalSize 1366
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [101]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 4
+ numRows 1
+ rawDataSize 0
+ totalSize 2837
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 102)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 102)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [102]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 2
+ numRows 4
+ rawDataSize 0
+ totalSize 1579
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select count(*) from stats_part where p = 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p = 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 1 Data size: 13668 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 1 Data size: 13668 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p = 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p = 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+#### A masked pattern was here ####
+1
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 5 Data size: 44200 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 5 Data size: 44200 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+5
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 5 Data size: 44200 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 5 Data size: 44200 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+5
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: max(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: int)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+140
+PREHOOK: query: describe extended stats_part partition (p=101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: describe extended stats_part partition (p=101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+key int
+value string
+p int
+
+# Partition Information
+# col_name data_type comment
+p int
+
+#### A masked pattern was here ####
+PREHOOK: query: describe extended stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: describe extended stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+key int
+value string
+p int
+
+# Partition Information
+# col_name data_type comment
+p int
+
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/test/results/clientpositive/stats_sizebug.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_sizebug.q.out b/ql/src/test/results/clientpositive/stats_sizebug.q.out
new file mode 100644
index 0000000..cf1c0a1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_sizebug.q.out
@@ -0,0 +1,210 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int,key int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int,key int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: drop table if exists stats_nonpartitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_nonpartitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_nonpartitioned
+PREHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: mysource
+ Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: (p = 100) (type: boolean)
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: 100 (type: int), key (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.stats_nonpartitioned
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: int)
+ outputColumnNames: key, value
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.stats_nonpartitioned
+
+ Stage: Stage-2
+ Stats Work
+ Basic Stats Work:
+ Column Stats Desc:
+ Columns: key, value
+ Column Types: int, int
+ Table: default.stats_nonpartitioned
+
+PREHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: Lineage: stats_nonpartitioned.key SIMPLE []
+POSTHOOK: Lineage: stats_nonpartitioned.value SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: desc formatted stats_nonpartitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: query: desc formatted stats_nonpartitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+# col_name data_type comment
+key int
+value int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ bucketing_version 2
+ numFiles 1
+ numRows 2
+ rawDataSize 16
+ totalSize 280
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table mysource compute statistics for columns p, key
+PREHOOK: type: ANALYZE_TABLE
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@mysource
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table mysource compute statistics for columns p, key
+POSTHOOK: type: ANALYZE_TABLE
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@mysource
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted stats_nonpartitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: query: desc formatted stats_nonpartitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+# col_name data_type comment
+key int
+value int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ bucketing_version 2
+ numFiles 1
+ numRows 2
+ rawDataSize 16
+ totalSize 280
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
[09/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 1d57aee..abafa4b 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -105,7 +105,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService
virtual void get_partitions_by_names(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & names) = 0;
virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0;
virtual void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts) = 0;
- virtual void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context) = 0;
+ virtual void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context, const int64_t txnId, const std::string& writeIdList) = 0;
virtual void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) = 0;
virtual void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const Partition& new_part) = 0;
virtual bool partition_name_has_valid_characters(const std::vector<std::string> & part_vals, const bool throw_exception) = 0;
@@ -516,7 +516,7 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
void alter_partitions(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<Partition> & /* new_parts */) {
return;
}
- void alter_partitions_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<Partition> & /* new_parts */, const EnvironmentContext& /* environment_context */) {
+ void alter_partitions_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<Partition> & /* new_parts */, const EnvironmentContext& /* environment_context */, const int64_t /* txnId */, const std::string& /* writeIdList */) {
return;
}
void alter_partition_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */, const EnvironmentContext& /* environment_context */) {
@@ -11637,11 +11637,13 @@ class ThriftHiveMetastore_alter_partitions_presult {
};
typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset {
- _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset() : db_name(false), tbl_name(false), new_parts(false), environment_context(false) {}
+ _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset() : db_name(false), tbl_name(false), new_parts(false), environment_context(false), txnId(false), writeIdList(false) {}
bool db_name :1;
bool tbl_name :1;
bool new_parts :1;
bool environment_context :1;
+ bool txnId :1;
+ bool writeIdList :1;
} _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset;
class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
@@ -11649,7 +11651,7 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
ThriftHiveMetastore_alter_partitions_with_environment_context_args(const ThriftHiveMetastore_alter_partitions_with_environment_context_args&);
ThriftHiveMetastore_alter_partitions_with_environment_context_args& operator=(const ThriftHiveMetastore_alter_partitions_with_environment_context_args&);
- ThriftHiveMetastore_alter_partitions_with_environment_context_args() : db_name(), tbl_name() {
+ ThriftHiveMetastore_alter_partitions_with_environment_context_args() : db_name(), tbl_name(), txnId(0), writeIdList() {
}
virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_args() throw();
@@ -11657,6 +11659,8 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
std::string tbl_name;
std::vector<Partition> new_parts;
EnvironmentContext environment_context;
+ int64_t txnId;
+ std::string writeIdList;
_ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset __isset;
@@ -11668,6 +11672,10 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
void __set_environment_context(const EnvironmentContext& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_writeIdList(const std::string& val);
+
bool operator == (const ThriftHiveMetastore_alter_partitions_with_environment_context_args & rhs) const
{
if (!(db_name == rhs.db_name))
@@ -11678,6 +11686,10 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
return false;
if (!(environment_context == rhs.environment_context))
return false;
+ if (!(txnId == rhs.txnId))
+ return false;
+ if (!(writeIdList == rhs.writeIdList))
+ return false;
return true;
}
bool operator != (const ThriftHiveMetastore_alter_partitions_with_environment_context_args &rhs) const {
@@ -11701,6 +11713,8 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_pargs {
const std::string* tbl_name;
const std::vector<Partition> * new_parts;
const EnvironmentContext* environment_context;
+ const int64_t* txnId;
+ const std::string* writeIdList;
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
@@ -26472,8 +26486,8 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
void recv_alter_partitions();
- void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
- void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
+ void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context, const int64_t txnId, const std::string& writeIdList);
+ void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context, const int64_t txnId, const std::string& writeIdList);
void recv_alter_partitions_with_environment_context();
void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
@@ -28100,13 +28114,13 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
ifaces_[i]->alter_partitions(db_name, tbl_name, new_parts);
}
- void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context) {
+ void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context, const int64_t txnId, const std::string& writeIdList) {
size_t sz = ifaces_.size();
size_t i = 0;
for (; i < (sz - 1); ++i) {
- ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context);
+ ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context, txnId, writeIdList);
}
- ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context);
+ ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context, txnId, writeIdList);
}
void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) {
@@ -29559,8 +29573,8 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
int32_t send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
void recv_alter_partitions(const int32_t seqid);
- void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
- int32_t send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
+ void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context, const int64_t txnId, const std::string& writeIdList);
+ int32_t send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context, const int64_t txnId, const std::string& writeIdList);
void recv_alter_partitions_with_environment_context(const int32_t seqid);
void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
int32_t send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index d45ec81..e6b7845 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -437,7 +437,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
printf("alter_partitions\n");
}
- void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context) {
+ void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context, const int64_t txnId, const std::string& writeIdList) {
// Your implementation goes here
printf("alter_partitions_with_environment_context\n");
}
[05/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
index a0ae84e..8f46012 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
@@ -40,6 +40,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);
private static final org.apache.thrift.protocol.TField NEED_MERGE_FIELD_DESC = new org.apache.thrift.protocol.TField("needMerge", org.apache.thrift.protocol.TType.BOOL, (short)2);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -49,11 +51,15 @@ import org.slf4j.LoggerFactory;
private List<ColumnStatistics> colStats; // required
private boolean needMerge; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
COL_STATS((short)1, "colStats"),
- NEED_MERGE((short)2, "needMerge");
+ NEED_MERGE((short)2, "needMerge"),
+ TXN_ID((short)3, "txnId"),
+ VALID_WRITE_ID_LIST((short)4, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -72,6 +78,10 @@ import org.slf4j.LoggerFactory;
return COL_STATS;
case 2: // NEED_MERGE
return NEED_MERGE;
+ case 3: // TXN_ID
+ return TXN_ID;
+ case 4: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -113,8 +123,9 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __NEEDMERGE_ISSET_ID = 0;
+ private static final int __TXNID_ISSET_ID = 1;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.NEED_MERGE};
+ private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -123,11 +134,17 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class))));
tmpMap.put(_Fields.NEED_MERGE, new org.apache.thrift.meta_data.FieldMetaData("needMerge", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetPartitionsStatsRequest.class, metaDataMap);
}
public SetPartitionsStatsRequest() {
+ this.txnId = -1L;
+
}
public SetPartitionsStatsRequest(
@@ -150,6 +167,10 @@ import org.slf4j.LoggerFactory;
this.colStats = __this__colStats;
}
this.needMerge = other.needMerge;
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public SetPartitionsStatsRequest deepCopy() {
@@ -161,6 +182,9 @@ import org.slf4j.LoggerFactory;
this.colStats = null;
setNeedMergeIsSet(false);
this.needMerge = false;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public int getColStatsSize() {
@@ -223,6 +247,51 @@ import org.slf4j.LoggerFactory;
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDMERGE_ISSET_ID, value);
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case COL_STATS:
@@ -241,6 +310,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -252,6 +337,12 @@ import org.slf4j.LoggerFactory;
case NEED_MERGE:
return isNeedMerge();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -267,6 +358,10 @@ import org.slf4j.LoggerFactory;
return isSetColStats();
case NEED_MERGE:
return isSetNeedMerge();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -302,6 +397,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -319,6 +432,16 @@ import org.slf4j.LoggerFactory;
if (present_needMerge)
list.add(needMerge);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -350,6 +473,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -383,6 +526,22 @@ import org.slf4j.LoggerFactory;
sb.append(this.needMerge);
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -459,6 +618,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 4: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -489,6 +664,18 @@ import org.slf4j.LoggerFactory;
oprot.writeBool(struct.needMerge);
oprot.writeFieldEnd();
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -517,10 +704,22 @@ import org.slf4j.LoggerFactory;
if (struct.isSetNeedMerge()) {
optionals.set(0);
}
- oprot.writeBitSet(optionals, 1);
+ if (struct.isSetTxnId()) {
+ optionals.set(1);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetNeedMerge()) {
oprot.writeBool(struct.needMerge);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -538,11 +737,19 @@ import org.slf4j.LoggerFactory;
}
}
struct.setColStatsIsSet(true);
- BitSet incoming = iprot.readBitSet(1);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.needMerge = iprot.readBool();
struct.setNeedMergeIsSet(true);
}
+ if (incoming.get(1)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
index 38d4f64..d9f17cc 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
@@ -56,6 +56,9 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creationMetadata", org.apache.thrift.protocol.TType.STRUCT, (short)16);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)17);
private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)18);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)19);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)20);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)21);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -81,6 +84,9 @@ import org.slf4j.LoggerFactory;
private CreationMetadata creationMetadata; // optional
private String catName; // optional
private PrincipalType ownerType; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -105,7 +111,14 @@ import org.slf4j.LoggerFactory;
*
* @see PrincipalType
*/
- OWNER_TYPE((short)18, "ownerType");
+ OWNER_TYPE((short)18, "ownerType"),
+ TXN_ID((short)19, "txnId"),
+ VALID_WRITE_ID_LIST((short)20, "validWriteIdList"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)21, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -156,6 +169,12 @@ import org.slf4j.LoggerFactory;
return CAT_NAME;
case 18: // OWNER_TYPE
return OWNER_TYPE;
+ case 19: // TXN_ID
+ return TXN_ID;
+ case 20: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
+ case 21: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -201,8 +220,9 @@ import org.slf4j.LoggerFactory;
private static final int __RETENTION_ISSET_ID = 2;
private static final int __TEMPORARY_ISSET_ID = 3;
private static final int __REWRITEENABLED_ISSET_ID = 4;
+ private static final int __TXNID_ISSET_ID = 5;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE};
+ private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -245,6 +265,12 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.OWNER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("ownerType", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap);
}
@@ -254,6 +280,8 @@ import org.slf4j.LoggerFactory;
this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER;
+ this.txnId = -1L;
+
}
public Table(
@@ -342,6 +370,13 @@ import org.slf4j.LoggerFactory;
if (other.isSetOwnerType()) {
this.ownerType = other.ownerType;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public Table deepCopy() {
@@ -374,6 +409,10 @@ import org.slf4j.LoggerFactory;
this.catName = null;
this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
+ this.isStatsCompliant = null;
}
public String getTableName() {
@@ -819,6 +858,82 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case TABLE_NAME:
@@ -965,6 +1080,30 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -1024,6 +1163,15 @@ import org.slf4j.LoggerFactory;
case OWNER_TYPE:
return getOwnerType();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -1071,6 +1219,12 @@ import org.slf4j.LoggerFactory;
return isSetCatName();
case OWNER_TYPE:
return isSetOwnerType();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -1250,6 +1404,33 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -1347,6 +1528,21 @@ import org.slf4j.LoggerFactory;
if (present_ownerType)
list.add(ownerType.getValue());
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -1538,6 +1734,36 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -1693,6 +1919,32 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -1914,6 +2166,30 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 19: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 20: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 21: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -2034,6 +2310,25 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -2106,7 +2401,16 @@ import org.slf4j.LoggerFactory;
if (struct.isSetOwnerType()) {
optionals.set(17);
}
- oprot.writeBitSet(optionals, 18);
+ if (struct.isSetTxnId()) {
+ optionals.set(18);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(19);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(20);
+ }
+ oprot.writeBitSet(optionals, 21);
if (struct.isSetTableName()) {
oprot.writeString(struct.tableName);
}
@@ -2174,12 +2478,21 @@ import org.slf4j.LoggerFactory;
if (struct.isSetOwnerType()) {
oprot.writeI32(struct.ownerType.getValue());
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(18);
+ BitSet incoming = iprot.readBitSet(21);
if (incoming.get(0)) {
struct.tableName = iprot.readString();
struct.setTableNameIsSet(true);
@@ -2276,6 +2589,18 @@ import org.slf4j.LoggerFactory;
struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
struct.setOwnerTypeIsSet(true);
}
+ if (incoming.get(18)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(19)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
+ if (incoming.get(20)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
index a663a64..c9b70a4 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
@@ -42,6 +42,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -53,13 +55,17 @@ import org.slf4j.LoggerFactory;
private String tblName; // required
private List<String> colNames; // required
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
DB_NAME((short)1, "dbName"),
TBL_NAME((short)2, "tblName"),
COL_NAMES((short)3, "colNames"),
- CAT_NAME((short)4, "catName");
+ CAT_NAME((short)4, "catName"),
+ TXN_ID((short)5, "txnId"),
+ VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -82,6 +88,10 @@ import org.slf4j.LoggerFactory;
return COL_NAMES;
case 4: // CAT_NAME
return CAT_NAME;
+ case 5: // TXN_ID
+ return TXN_ID;
+ case 6: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -122,7 +132,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.CAT_NAME};
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -135,11 +147,17 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsRequest.class, metaDataMap);
}
public TableStatsRequest() {
+ this.txnId = -1L;
+
}
public TableStatsRequest(
@@ -157,6 +175,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public TableStatsRequest(TableStatsRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDbName()) {
this.dbName = other.dbName;
}
@@ -170,6 +189,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public TableStatsRequest deepCopy() {
@@ -182,6 +205,9 @@ import org.slf4j.LoggerFactory;
this.tblName = null;
this.colNames = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public String getDbName() {
@@ -291,6 +317,51 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -325,6 +396,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -342,6 +429,12 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -361,6 +454,10 @@ import org.slf4j.LoggerFactory;
return isSetColNames();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -414,6 +511,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -441,6 +556,16 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -492,6 +617,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -545,6 +690,22 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -576,6 +737,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -642,6 +805,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 5: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 6: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -684,6 +863,18 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -714,10 +905,22 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(0);
}
- oprot.writeBitSet(optionals, 1);
+ if (struct.isSetTxnId()) {
+ optionals.set(1);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -738,11 +941,19 @@ import org.slf4j.LoggerFactory;
}
}
struct.setColNamesIsSet(true);
- BitSet incoming = iprot.readBitSet(1);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(1)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
index dff7d5c..4864f68 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
@@ -39,6 +39,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsResult");
private static final org.apache.thrift.protocol.TField TABLE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableStats", org.apache.thrift.protocol.TType.LIST, (short)1);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -47,10 +48,16 @@ import org.slf4j.LoggerFactory;
}
private List<ColumnStatisticsObj> tableStats; // required
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- TABLE_STATS((short)1, "tableStats");
+ TABLE_STATS((short)1, "tableStats"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -67,6 +74,8 @@ import org.slf4j.LoggerFactory;
switch(fieldId) {
case 1: // TABLE_STATS
return TABLE_STATS;
+ case 2: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -107,12 +116,15 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
+ private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.TABLE_STATS, new org.apache.thrift.meta_data.FieldMetaData("tableStats", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsResult.class, metaDataMap);
}
@@ -138,6 +150,9 @@ import org.slf4j.LoggerFactory;
}
this.tableStats = __this__tableStats;
}
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public TableStatsResult deepCopy() {
@@ -147,6 +162,7 @@ import org.slf4j.LoggerFactory;
@Override
public void clear() {
this.tableStats = null;
+ this.isStatsCompliant = null;
}
public int getTableStatsSize() {
@@ -187,6 +203,37 @@ import org.slf4j.LoggerFactory;
}
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case TABLE_STATS:
@@ -197,6 +244,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -205,6 +260,9 @@ import org.slf4j.LoggerFactory;
case TABLE_STATS:
return getTableStats();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -218,6 +276,8 @@ import org.slf4j.LoggerFactory;
switch (field) {
case TABLE_STATS:
return isSetTableStats();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -244,6 +304,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -256,6 +325,11 @@ import org.slf4j.LoggerFactory;
if (present_tableStats)
list.add(tableStats);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -277,6 +351,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -304,6 +388,16 @@ import org.slf4j.LoggerFactory;
sb.append(this.tableStats);
}
first = false;
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -370,6 +464,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 2: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -395,6 +497,13 @@ import org.slf4j.LoggerFactory;
}
oprot.writeFieldEnd();
}
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -419,6 +528,14 @@ import org.slf4j.LoggerFactory;
_iter428.write(oprot);
}
}
+ BitSet optionals = new BitSet();
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -436,6 +553,11 @@ import org.slf4j.LoggerFactory;
}
}
struct.setTableStatsIsSet(true);
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 672ebf9..c5a2d2b 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -208,7 +208,7 @@ import org.slf4j.LoggerFactory;
public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
- public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, long txnId, String writeIdList) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
@@ -626,7 +626,7 @@ import org.slf4j.LoggerFactory;
public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
- public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, long txnId, String writeIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -3401,19 +3401,21 @@ import org.slf4j.LoggerFactory;
return;
}
- public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException
+ public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, long txnId, String writeIdList) throws InvalidOperationException, MetaException, org.apache.thrift.TException
{
- send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context);
+ send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context, txnId, writeIdList);
recv_alter_partitions_with_environment_context();
}
- public void send_alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws org.apache.thrift.TException
+ public void send_alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, long txnId, String writeIdList) throws org.apache.thrift.TException
{
alter_partitions_with_environment_context_args args = new alter_partitions_with_environment_context_args();
args.setDb_name(db_name);
args.setTbl_name(tbl_name);
args.setNew_parts(new_parts);
args.setEnvironment_context(environment_context);
+ args.setTxnId(txnId);
+ args.setWriteIdList(writeIdList);
sendBase("alter_partitions_with_environment_context", args);
}
@@ -9869,9 +9871,9 @@ import org.slf4j.LoggerFactory;
}
}
- public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, long txnId, String writeIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
- alter_partitions_with_environment_context_call method_call = new alter_partitions_with_environment_context_call(db_name, tbl_name, new_parts, environment_context, resultHandler, this, ___protocolFactory, ___transport);
+ alter_partitions_with_environment_context_call method_call = new alter_partitions_with_environment_context_call(db_name, tbl_name, new_parts, environment_context, txnId, writeIdList, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
@@ -9881,12 +9883,16 @@ import org.slf4j.LoggerFactory;
private String tbl_name;
private List<Partition> new_parts;
private EnvironmentContext environment_context;
- public alter_partitions_with_environment_context_call(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ private long txnId;
+ private String writeIdList;
+ public alter_partitions_with_environment_context_call(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, long txnId, String writeIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.db_name = db_name;
this.tbl_name = tbl_name;
this.new_parts = new_parts;
this.environment_context = environment_context;
+ this.txnId = txnId;
+ this.writeIdList = writeIdList;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
@@ -9896,6 +9902,8 @@ import org.slf4j.LoggerFactory;
args.setTbl_name(tbl_name);
args.setNew_parts(new_parts);
args.setEnvironment_context(environment_context);
+ args.setTxnId(txnId);
+ args.setWriteIdList(writeIdList);
args.write(prot);
prot.writeMessageEnd();
}
@@ -16414,7 +16422,7 @@ import org.slf4j.LoggerFactory;
public alter_partitions_with_environment_context_result getResult(I iface, alter_partitions_with_environment_context_args args) throws org.apache.thrift.TException {
alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result();
try {
- iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context);
+ iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context, args.txnId, args.writeIdList);
} catch (InvalidOperationException o1) {
result.o1 = o1;
} catch (MetaException o2) {
@@ -25008,7 +25016,7 @@ import org.slf4j.LoggerFactory;
}
public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context,resultHandler);
+ iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context, args.txnId, args.writeIdList,resultHandler);
}
}
@@ -124189,6 +124197,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final org.apache.thrift.protocol.TField NEW_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("new_parts", org.apache.thrift.protocol.TType.LIST, (short)3);
private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
+ private static final org.apache.thrift.protocol.TField WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("writeIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -124200,13 +124210,17 @@ import org.slf4j.LoggerFactory;
private String tbl_name; // required
private List<Partition> new_parts; // required
private EnvironmentContext environment_context; // required
+ private long txnId; // required
+ private String writeIdList; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
DB_NAME((short)1, "db_name"),
TBL_NAME((short)2, "tbl_name"),
NEW_PARTS((short)3, "new_parts"),
- ENVIRONMENT_CONTEXT((short)4, "environment_context");
+ ENVIRONMENT_CONTEXT((short)4, "environment_context"),
+ TXN_ID((short)5, "txnId"),
+ WRITE_ID_LIST((short)6, "writeIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -124229,6 +124243,10 @@ import org.slf4j.LoggerFactory;
return NEW_PARTS;
case 4: // ENVIRONMENT_CONTEXT
return ENVIRONMENT_CONTEXT;
+ case 5: // TXN_ID
+ return TXN_ID;
+ case 6: // WRITE_ID_LIST
+ return WRITE_ID_LIST;
default:
return null;
}
@@ -124269,6 +124287,8 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -124281,6 +124301,10 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("writeIdList", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partitions_with_environment_context_args.class, metaDataMap);
}
@@ -124292,19 +124316,25 @@ import org.slf4j.LoggerFactory;
String db_name,
String tbl_name,
List<Partition> new_parts,
- EnvironmentContext environment_context)
+ EnvironmentContext environment_context,
+ long txnId,
+ String writeIdList)
{
this();
this.db_name = db_name;
this.tbl_name = tbl_name;
this.new_parts = new_parts;
this.environment_context = environment_context;
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ this.writeIdList = writeIdList;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public alter_partitions_with_environment_context_args(alter_partitions_with_environment_context_args other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDb_name()) {
this.db_name = other.db_name;
}
@@ -124321,6 +124351,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetEnvironment_context()) {
this.environment_context = new EnvironmentContext(other.environment_context);
}
+ this.txnId = other.txnId;
+ if (other.isSetWriteIdList()) {
+ this.writeIdList = other.writeIdList;
+ }
}
public alter_partitions_with_environment_context_args deepCopy() {
@@ -124333,6 +124367,9 @@ import org.slf4j.LoggerFactory;
this.tbl_name = null;
this.new_parts = null;
this.environment_context = null;
+ setTxnIdIsSet(false);
+ this.txnId = 0;
+ this.writeIdList = null;
}
public String getDb_name() {
@@ -124442,6 +124479,51 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getWriteIdList() {
+ return this.writeIdList;
+ }
+
+ public void setWriteIdList(String writeIdList) {
+ this.writeIdList = writeIdList;
+ }
+
+ public void unsetWriteIdList() {
+ this.writeIdList = null;
+ }
+
+ /** Returns true if field writeIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetWriteIdList() {
+ return this.writeIdList != null;
+ }
+
+ public void setWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.writeIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -124476,6 +124558,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case WRITE_ID_LIST:
+ if (value == null) {
+ unsetWriteIdList();
+ } else {
+ setWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -124493,6 +124591,12 @@ import org.slf4j.LoggerFactory;
case ENVIRONMENT_CONTEXT:
return getEnvironment_context();
+ case TXN_ID:
+ return getTxnId();
+
+ case WRITE_ID_LIST:
+ return getWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -124512,6 +124616,10 @@ import org.slf4j.LoggerFactory;
return isSetNew_parts();
case ENVIRONMENT_CONTEXT:
return isSetEnvironment_context();
+ case TXN_ID:
+ return isSetTxnId();
+ case WRITE_ID_LIST:
+ return isSetWriteIdList();
}
throw new IllegalStateException();
}
@@ -124565,6 +124673,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true;
+ boolean that_present_txnId = true;
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_writeIdList = true && this.isSetWriteIdList();
+ boolean that_present_writeIdList = true && that.isSetWriteIdList();
+ if (this_present_writeIdList || that_present_writeIdList) {
+ if (!(this_present_writeIdList && that_present_writeIdList))
+ return false;
+ if (!this.writeIdList.equals(that.writeIdList))
+ return false;
+ }
+
return true;
}
@@ -124592,6 +124718,16 @@ import org.slf4j.LoggerFactory;
if (present_environment_context)
list.add(environment_context);
+ boolean present_txnId = true;
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_writeIdList = true && (isSetWriteIdList());
+ list.add(present_writeIdList);
+ if (present_writeIdList)
+ list.add(writeIdList);
+
return list.hashCode();
}
@@ -124643,6 +124779,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetWriteIdList()).compareTo(other.isSetWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeIdList, other.writeIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -124694,6 +124850,18 @@ import org.slf4j.LoggerFactory;
sb.append(this.environment_context);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("writeIdList:");
+ if (this.writeIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.writeIdList);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -124716,6 +124884,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -124784,6 +124954,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 5: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 6: // WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.writeIdList = iprot.readString();
+ struct.setWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -124824,6 +125010,14 @@ import org.slf4j.LoggerFactory;
struct.environment_context.write(oprot);
oprot.writeFieldEnd();
}
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ if (struct.writeIdList != null) {
+ oprot.writeFieldBegin(WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.writeIdList);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -124854,7 +125048,13 @@ import org.slf4j.LoggerFactory;
if (struct.isSetEnvironment_context()) {
optionals.set(3);
}
- oprot.writeBitSet(optionals, 4);
+ if (struct.isSetTxnId()) {
+ optionals.set(4);
+ }
+ if (struct.isSetWriteIdList()) {
+ optionals.set(5);
+ }
+ oprot.writeBitSet(optionals, 6);
if (struct.isSetDb_name()) {
oprot.writeString(struct.db_name);
}
@@ -124873,12 +125073,18 @@ import org.slf4j.LoggerFactory;
if (struct.isSetEnvironment_context()) {
struct.environment_context.write(oprot);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetWriteIdList()) {
+ oprot.writeString(struct.writeIdList);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(4);
+ BitSet incoming = iprot.readBitSet(6);
if (incoming.get(0)) {
struct.db_name = iprot.readString();
struct.setDb_nameIsSet(true);
@@ -124906,6 +125112,14 @@ import org.slf4j.LoggerFactory;
struct.environment_context.read(iprot);
struct.setEnvironment_contextIsSet(true);
}
+ if (incoming.get(4)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(5)) {
+ struct.writeIdList = iprot.readString();
+ struct.setWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index ec26cca..109bf9c 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -718,10 +718,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
* @param string $tbl_name
* @param \metastore\Partition[] $new_parts
* @param \metastore\EnvironmentContext $environment_context
+ * @param int $txnId
+ * @param string $writeIdList
* @throws \metastore\InvalidOperationException
* @throws \metastore\MetaException
*/
- public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context);
+ public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context, $txnId, $writeIdList);
/**
* @param string $db_name
* @param string $tbl_name
@@ -6394,19 +6396,21 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
return;
}
- public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context)
+ public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context, $txnId, $writeIdList)
{
- $this->send_alter_partitions_with_environment_context($db_name, $tbl_name, $new_parts, $environment_context);
+ $this->send_alter_partitions_with_environment_context($db_name, $tbl_name, $new_parts, $environment_context, $txnId, $writeIdList);
$this->recv_alter_partitions_with_environment_context();
}
- public function send_alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context)
+ public function send_alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context, $txnId, $writeIdList)
{
$args = new \metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_args();
$args->db_name = $db_name;
$args->tbl_name = $tbl_name;
$args->new_parts = $new_parts;
$args->environment_context = $environment_context;
+ $args->txnId = $txnId;
+ $args->writeIdList = $writeIdList;
$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
if ($bin_accel)
{
@@ -34329,6 +34333,14 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
* @var \metastore\EnvironmentContext
*/
public $environment_context = null;
+ /**
+ * @var int
+ */
+ public $txnId = null;
+ /**
+ * @var string
+ */
+ public $writeIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -34355,6 +34367,14 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
'type' => TType::STRUCT,
'class' => '\metastore\EnvironmentContext',
),
+ 5 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 6 => array(
+ 'var' => 'writeIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -34370,6 +34390,12 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
if (isset($vals['environment_context'])) {
$this->environment_context = $vals['environment_context'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['writeIdList'])) {
+ $this->writeIdList = $vals['writeIdList'];
+ }
}
}
@@ -34432,6 +34458,20 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
$xfer += $input->skip($ftype);
}
break;
+ case 5:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 6:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->writeIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -34480,6 +34520,16 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
$xfer += $this->environment_context->write($output);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 5);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->writeIdList !== null) {
+ $xfer += $output->writeFieldBegin('writeIdList', TType::STRING, 6);
+ $xfer += $output->writeString($this->writeIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
[07/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 5c6495e..a4bba04 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -185,6 +185,16 @@ struct SchemaVersionState {
extern const std::map<int, const char*> _SchemaVersionState_VALUES_TO_NAMES;
+struct IsolationLevelCompliance {
+ enum type {
+ YES = 1,
+ NO = 2,
+ UNKNOWN = 3
+ };
+};
+
+extern const std::map<int, const char*> _IsolationLevelCompliance_VALUES_TO_NAMES;
+
struct FunctionType {
enum type {
JAVA = 1
@@ -3101,7 +3111,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj)
}
typedef struct _Table__isset {
- _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true) {}
+ _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true), txnId(true), validWriteIdList(false), isStatsCompliant(false) {}
bool tableName :1;
bool dbName :1;
bool owner :1;
@@ -3120,6 +3130,9 @@ typedef struct _Table__isset {
bool creationMetadata :1;
bool catName :1;
bool ownerType :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
+ bool isStatsCompliant :1;
} _Table__isset;
class Table {
@@ -3127,7 +3140,7 @@ class Table {
Table(const Table&);
Table& operator=(const Table&);
- Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1) {
+ Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) {
ownerType = (PrincipalType::type)1;
}
@@ -3151,6 +3164,9 @@ class Table {
CreationMetadata creationMetadata;
std::string catName;
PrincipalType::type ownerType;
+ int64_t txnId;
+ std::string validWriteIdList;
+ IsolationLevelCompliance::type isStatsCompliant;
_Table__isset __isset;
@@ -3190,6 +3206,12 @@ class Table {
void __set_ownerType(const PrincipalType::type val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const Table & rhs) const
{
if (!(tableName == rhs.tableName))
@@ -3240,6 +3262,18 @@ class Table {
return false;
else if (__isset.ownerType && !(ownerType == rhs.ownerType))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const Table &rhs) const {
@@ -3263,7 +3297,7 @@ inline std::ostream& operator<<(std::ostream& out, const Table& obj)
}
typedef struct _Partition__isset {
- _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false) {}
+ _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false), txnId(true), validWriteIdList(false), isStatsCompliant(false) {}
bool values :1;
bool dbName :1;
bool tableName :1;
@@ -3273,6 +3307,9 @@ typedef struct _Partition__isset {
bool parameters :1;
bool privileges :1;
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
+ bool isStatsCompliant :1;
} _Partition__isset;
class Partition {
@@ -3280,7 +3317,7 @@ class Partition {
Partition(const Partition&);
Partition& operator=(const Partition&);
- Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName() {
+ Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName(), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~Partition() throw();
@@ -3293,6 +3330,9 @@ class Partition {
std::map<std::string, std::string> parameters;
PrincipalPrivilegeSet privileges;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
+ IsolationLevelCompliance::type isStatsCompliant;
_Partition__isset __isset;
@@ -3314,6 +3354,12 @@ class Partition {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const Partition & rhs) const
{
if (!(values == rhs.values))
@@ -3338,6 +3384,18 @@ class Partition {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const Partition &rhs) const {
@@ -3537,13 +3595,16 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionListComposingS
}
typedef struct _PartitionSpec__isset {
- _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false) {}
+ _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false), txnId(true), validWriteIdList(false), isStatsCompliant(false) {}
bool dbName :1;
bool tableName :1;
bool rootPath :1;
bool sharedSDPartitionSpec :1;
bool partitionList :1;
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
+ bool isStatsCompliant :1;
} _PartitionSpec__isset;
class PartitionSpec {
@@ -3551,7 +3612,7 @@ class PartitionSpec {
PartitionSpec(const PartitionSpec&);
PartitionSpec& operator=(const PartitionSpec&);
- PartitionSpec() : dbName(), tableName(), rootPath(), catName() {
+ PartitionSpec() : dbName(), tableName(), rootPath(), catName(), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~PartitionSpec() throw();
@@ -3561,6 +3622,9 @@ class PartitionSpec {
PartitionSpecWithSharedSD sharedSDPartitionSpec;
PartitionListComposingSpec partitionList;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
+ IsolationLevelCompliance::type isStatsCompliant;
_PartitionSpec__isset __isset;
@@ -3576,6 +3640,12 @@ class PartitionSpec {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const PartitionSpec & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -3596,6 +3666,18 @@ class PartitionSpec {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const PartitionSpec &rhs) const {
@@ -4404,29 +4486,58 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatisticsDesc& o
return out;
}
+typedef struct _ColumnStatistics__isset {
+ _ColumnStatistics__isset() : txnId(true), validWriteIdList(false), isStatsCompliant(false) {}
+ bool txnId :1;
+ bool validWriteIdList :1;
+ bool isStatsCompliant :1;
+} _ColumnStatistics__isset;
class ColumnStatistics {
public:
ColumnStatistics(const ColumnStatistics&);
ColumnStatistics& operator=(const ColumnStatistics&);
- ColumnStatistics() {
+ ColumnStatistics() : txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~ColumnStatistics() throw();
ColumnStatisticsDesc statsDesc;
std::vector<ColumnStatisticsObj> statsObj;
+ int64_t txnId;
+ std::string validWriteIdList;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _ColumnStatistics__isset __isset;
void __set_statsDesc(const ColumnStatisticsDesc& val);
void __set_statsObj(const std::vector<ColumnStatisticsObj> & val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const ColumnStatistics & rhs) const
{
if (!(statsDesc == rhs.statsDesc))
return false;
if (!(statsObj == rhs.statsObj))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const ColumnStatistics &rhs) const {
@@ -4449,29 +4560,42 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatistics& obj)
return out;
}
+typedef struct _AggrStats__isset {
+ _AggrStats__isset() : isStatsCompliant(false) {}
+ bool isStatsCompliant :1;
+} _AggrStats__isset;
class AggrStats {
public:
AggrStats(const AggrStats&);
AggrStats& operator=(const AggrStats&);
- AggrStats() : partsFound(0) {
+ AggrStats() : partsFound(0), isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~AggrStats() throw();
std::vector<ColumnStatisticsObj> colStats;
int64_t partsFound;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _AggrStats__isset __isset;
void __set_colStats(const std::vector<ColumnStatisticsObj> & val);
void __set_partsFound(const int64_t val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const AggrStats & rhs) const
{
if (!(colStats == rhs.colStats))
return false;
if (!(partsFound == rhs.partsFound))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const AggrStats &rhs) const {
@@ -4495,8 +4619,10 @@ inline std::ostream& operator<<(std::ostream& out, const AggrStats& obj)
}
typedef struct _SetPartitionsStatsRequest__isset {
- _SetPartitionsStatsRequest__isset() : needMerge(false) {}
+ _SetPartitionsStatsRequest__isset() : needMerge(false), txnId(true), validWriteIdList(false) {}
bool needMerge :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _SetPartitionsStatsRequest__isset;
class SetPartitionsStatsRequest {
@@ -4504,12 +4630,14 @@ class SetPartitionsStatsRequest {
SetPartitionsStatsRequest(const SetPartitionsStatsRequest&);
SetPartitionsStatsRequest& operator=(const SetPartitionsStatsRequest&);
- SetPartitionsStatsRequest() : needMerge(0) {
+ SetPartitionsStatsRequest() : needMerge(0), txnId(-1LL), validWriteIdList() {
}
virtual ~SetPartitionsStatsRequest() throw();
std::vector<ColumnStatistics> colStats;
bool needMerge;
+ int64_t txnId;
+ std::string validWriteIdList;
_SetPartitionsStatsRequest__isset __isset;
@@ -4517,6 +4645,10 @@ class SetPartitionsStatsRequest {
void __set_needMerge(const bool val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const SetPartitionsStatsRequest & rhs) const
{
if (!(colStats == rhs.colStats))
@@ -4525,6 +4657,14 @@ class SetPartitionsStatsRequest {
return false;
else if (__isset.needMerge && !(needMerge == rhs.needMerge))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const SetPartitionsStatsRequest &rhs) const {
@@ -5642,24 +5782,37 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsByExprRequest
return out;
}
+typedef struct _TableStatsResult__isset {
+ _TableStatsResult__isset() : isStatsCompliant(false) {}
+ bool isStatsCompliant :1;
+} _TableStatsResult__isset;
class TableStatsResult {
public:
TableStatsResult(const TableStatsResult&);
TableStatsResult& operator=(const TableStatsResult&);
- TableStatsResult() {
+ TableStatsResult() : isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~TableStatsResult() throw();
std::vector<ColumnStatisticsObj> tableStats;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _TableStatsResult__isset __isset;
void __set_tableStats(const std::vector<ColumnStatisticsObj> & val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const TableStatsResult & rhs) const
{
if (!(tableStats == rhs.tableStats))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const TableStatsResult &rhs) const {
@@ -5682,24 +5835,37 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsResult& obj)
return out;
}
+typedef struct _PartitionsStatsResult__isset {
+ _PartitionsStatsResult__isset() : isStatsCompliant(false) {}
+ bool isStatsCompliant :1;
+} _PartitionsStatsResult__isset;
class PartitionsStatsResult {
public:
PartitionsStatsResult(const PartitionsStatsResult&);
PartitionsStatsResult& operator=(const PartitionsStatsResult&);
- PartitionsStatsResult() {
+ PartitionsStatsResult() : isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~PartitionsStatsResult() throw();
std::map<std::string, std::vector<ColumnStatisticsObj> > partStats;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _PartitionsStatsResult__isset __isset;
void __set_partStats(const std::map<std::string, std::vector<ColumnStatisticsObj> > & val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const PartitionsStatsResult & rhs) const
{
if (!(partStats == rhs.partStats))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const PartitionsStatsResult &rhs) const {
@@ -5723,8 +5889,10 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsResult&
}
typedef struct _TableStatsRequest__isset {
- _TableStatsRequest__isset() : catName(false) {}
+ _TableStatsRequest__isset() : catName(false), txnId(true), validWriteIdList(false) {}
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _TableStatsRequest__isset;
class TableStatsRequest {
@@ -5732,7 +5900,7 @@ class TableStatsRequest {
TableStatsRequest(const TableStatsRequest&);
TableStatsRequest& operator=(const TableStatsRequest&);
- TableStatsRequest() : dbName(), tblName(), catName() {
+ TableStatsRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() {
}
virtual ~TableStatsRequest() throw();
@@ -5740,6 +5908,8 @@ class TableStatsRequest {
std::string tblName;
std::vector<std::string> colNames;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
_TableStatsRequest__isset __isset;
@@ -5751,6 +5921,10 @@ class TableStatsRequest {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const TableStatsRequest & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -5763,6 +5937,14 @@ class TableStatsRequest {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const TableStatsRequest &rhs) const {
@@ -5786,8 +5968,10 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsRequest& obj)
}
typedef struct _PartitionsStatsRequest__isset {
- _PartitionsStatsRequest__isset() : catName(false) {}
+ _PartitionsStatsRequest__isset() : catName(false), txnId(true), validWriteIdList(false) {}
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _PartitionsStatsRequest__isset;
class PartitionsStatsRequest {
@@ -5795,7 +5979,7 @@ class PartitionsStatsRequest {
PartitionsStatsRequest(const PartitionsStatsRequest&);
PartitionsStatsRequest& operator=(const PartitionsStatsRequest&);
- PartitionsStatsRequest() : dbName(), tblName(), catName() {
+ PartitionsStatsRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() {
}
virtual ~PartitionsStatsRequest() throw();
@@ -5804,6 +5988,8 @@ class PartitionsStatsRequest {
std::vector<std::string> colNames;
std::vector<std::string> partNames;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
_PartitionsStatsRequest__isset __isset;
@@ -5817,6 +6003,10 @@ class PartitionsStatsRequest {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const PartitionsStatsRequest & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -5831,6 +6021,14 @@ class PartitionsStatsRequest {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const PartitionsStatsRequest &rhs) const {
@@ -5854,8 +6052,9 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsRequest&
}
typedef struct _AddPartitionsResult__isset {
- _AddPartitionsResult__isset() : partitions(false) {}
+ _AddPartitionsResult__isset() : partitions(false), isStatsCompliant(false) {}
bool partitions :1;
+ bool isStatsCompliant :1;
} _AddPartitionsResult__isset;
class AddPartitionsResult {
@@ -5863,22 +6062,29 @@ class AddPartitionsResult {
AddPartitionsResult(const AddPartitionsResult&);
AddPartitionsResult& operator=(const AddPartitionsResult&);
- AddPartitionsResult() {
+ AddPartitionsResult() : isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~AddPartitionsResult() throw();
std::vector<Partition> partitions;
+ IsolationLevelCompliance::type isStatsCompliant;
_AddPartitionsResult__isset __isset;
void __set_partitions(const std::vector<Partition> & val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const AddPartitionsResult & rhs) const
{
if (__isset.partitions != rhs.__isset.partitions)
return false;
else if (__isset.partitions && !(partitions == rhs.partitions))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const AddPartitionsResult &rhs) const {
@@ -5902,9 +6108,11 @@ inline std::ostream& operator<<(std::ostream& out, const AddPartitionsResult& ob
}
typedef struct _AddPartitionsRequest__isset {
- _AddPartitionsRequest__isset() : needResult(true), catName(false) {}
+ _AddPartitionsRequest__isset() : needResult(true), catName(false), txnId(true), validWriteIdList(false) {}
bool needResult :1;
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _AddPartitionsRequest__isset;
class AddPartitionsRequest {
@@ -5912,7 +6120,7 @@ class AddPartitionsRequest {
AddPartitionsRequest(const AddPartitionsRequest&);
AddPartitionsRequest& operator=(const AddPartitionsRequest&);
- AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName() {
+ AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName(), txnId(-1LL), validWriteIdList() {
}
virtual ~AddPartitionsRequest() throw();
@@ -5922,6 +6130,8 @@ class AddPartitionsRequest {
bool ifNotExists;
bool needResult;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
_AddPartitionsRequest__isset __isset;
@@ -5937,6 +6147,10 @@ class AddPartitionsRequest {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const AddPartitionsRequest & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -5955,6 +6169,14 @@ class AddPartitionsRequest {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const AddPartitionsRequest &rhs) const {
@@ -9873,9 +10095,11 @@ inline std::ostream& operator<<(std::ostream& out, const ClientCapabilities& obj
}
typedef struct _GetTableRequest__isset {
- _GetTableRequest__isset() : capabilities(false), catName(false) {}
+ _GetTableRequest__isset() : capabilities(false), catName(false), txnId(true), validWriteIdList(false) {}
bool capabilities :1;
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _GetTableRequest__isset;
class GetTableRequest {
@@ -9883,7 +10107,7 @@ class GetTableRequest {
GetTableRequest(const GetTableRequest&);
GetTableRequest& operator=(const GetTableRequest&);
- GetTableRequest() : dbName(), tblName(), catName() {
+ GetTableRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() {
}
virtual ~GetTableRequest() throw();
@@ -9891,6 +10115,8 @@ class GetTableRequest {
std::string tblName;
ClientCapabilities capabilities;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
_GetTableRequest__isset __isset;
@@ -9902,6 +10128,10 @@ class GetTableRequest {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const GetTableRequest & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -9916,6 +10146,14 @@ class GetTableRequest {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const GetTableRequest &rhs) const {
@@ -9938,24 +10176,37 @@ inline std::ostream& operator<<(std::ostream& out, const GetTableRequest& obj)
return out;
}
+typedef struct _GetTableResult__isset {
+ _GetTableResult__isset() : isStatsCompliant(false) {}
+ bool isStatsCompliant :1;
+} _GetTableResult__isset;
class GetTableResult {
public:
GetTableResult(const GetTableResult&);
GetTableResult& operator=(const GetTableResult&);
- GetTableResult() {
+ GetTableResult() : isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~GetTableResult() throw();
Table table;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _GetTableResult__isset __isset;
void __set_table(const Table& val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const GetTableResult & rhs) const
{
if (!(table == rhs.table))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const GetTableResult &rhs) const {
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
index dd3a127..56e5043 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
@@ -44,6 +44,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField IF_NOT_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifNotExists", org.apache.thrift.protocol.TType.BOOL, (short)4);
private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)5);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -57,6 +59,8 @@ import org.slf4j.LoggerFactory;
private boolean ifNotExists; // required
private boolean needResult; // optional
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -65,7 +69,9 @@ import org.slf4j.LoggerFactory;
PARTS((short)3, "parts"),
IF_NOT_EXISTS((short)4, "ifNotExists"),
NEED_RESULT((short)5, "needResult"),
- CAT_NAME((short)6, "catName");
+ CAT_NAME((short)6, "catName"),
+ TXN_ID((short)7, "txnId"),
+ VALID_WRITE_ID_LIST((short)8, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -92,6 +98,10 @@ import org.slf4j.LoggerFactory;
return NEED_RESULT;
case 6: // CAT_NAME
return CAT_NAME;
+ case 7: // TXN_ID
+ return TXN_ID;
+ case 8: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -134,8 +144,9 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __IFNOTEXISTS_ISSET_ID = 0;
private static final int __NEEDRESULT_ISSET_ID = 1;
+ private static final int __TXNID_ISSET_ID = 2;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME};
+ private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -152,6 +163,10 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsRequest.class, metaDataMap);
}
@@ -159,6 +174,8 @@ import org.slf4j.LoggerFactory;
public AddPartitionsRequest() {
this.needResult = true;
+ this.txnId = -1L;
+
}
public AddPartitionsRequest(
@@ -198,6 +215,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public AddPartitionsRequest deepCopy() {
@@ -214,6 +235,9 @@ import org.slf4j.LoggerFactory;
this.needResult = true;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public String getDbName() {
@@ -367,6 +391,51 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -417,6 +486,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -440,6 +525,12 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -463,6 +554,10 @@ import org.slf4j.LoggerFactory;
return isSetNeedResult();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -534,6 +629,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -571,6 +684,16 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -642,6 +765,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -705,6 +848,22 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -825,6 +984,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 7: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 8: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -875,6 +1050,18 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -909,13 +1096,25 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(1);
}
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetTxnId()) {
+ optionals.set(2);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(3);
+ }
+ oprot.writeBitSet(optionals, 4);
if (struct.isSetNeedResult()) {
oprot.writeBool(struct.needResult);
}
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -939,7 +1138,7 @@ import org.slf4j.LoggerFactory;
struct.setPartsIsSet(true);
struct.ifNotExists = iprot.readBool();
struct.setIfNotExistsIsSet(true);
- BitSet incoming = iprot.readBitSet(2);
+ BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
struct.needResult = iprot.readBool();
struct.setNeedResultIsSet(true);
@@ -948,6 +1147,14 @@ import org.slf4j.LoggerFactory;
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(2)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(3)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
index fe41b8c..03d1fc4 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
@@ -39,6 +39,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsResult");
private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -47,10 +48,16 @@ import org.slf4j.LoggerFactory;
}
private List<Partition> partitions; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- PARTITIONS((short)1, "partitions");
+ PARTITIONS((short)1, "partitions"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -67,6 +74,8 @@ import org.slf4j.LoggerFactory;
switch(fieldId) {
case 1: // PARTITIONS
return PARTITIONS;
+ case 2: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -107,13 +116,15 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.PARTITIONS};
+ private static final _Fields optionals[] = {_Fields.PARTITIONS,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsResult.class, metaDataMap);
}
@@ -132,6 +143,9 @@ import org.slf4j.LoggerFactory;
}
this.partitions = __this__partitions;
}
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public AddPartitionsResult deepCopy() {
@@ -141,6 +155,7 @@ import org.slf4j.LoggerFactory;
@Override
public void clear() {
this.partitions = null;
+ this.isStatsCompliant = null;
}
public int getPartitionsSize() {
@@ -181,6 +196,37 @@ import org.slf4j.LoggerFactory;
}
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case PARTITIONS:
@@ -191,6 +237,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -199,6 +253,9 @@ import org.slf4j.LoggerFactory;
case PARTITIONS:
return getPartitions();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -212,6 +269,8 @@ import org.slf4j.LoggerFactory;
switch (field) {
case PARTITIONS:
return isSetPartitions();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -238,6 +297,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -250,6 +318,11 @@ import org.slf4j.LoggerFactory;
if (present_partitions)
list.add(partitions);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -271,6 +344,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -300,6 +383,16 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -362,6 +455,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 2: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -389,6 +490,13 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -410,7 +518,10 @@ import org.slf4j.LoggerFactory;
if (struct.isSetPartitions()) {
optionals.set(0);
}
- oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(1);
+ }
+ oprot.writeBitSet(optionals, 2);
if (struct.isSetPartitions()) {
{
oprot.writeI32(struct.partitions.size());
@@ -420,12 +531,15 @@ import org.slf4j.LoggerFactory;
}
}
}
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(1);
+ BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
@@ -440,6 +554,10 @@ import org.slf4j.LoggerFactory;
}
struct.setPartitionsIsSet(true);
}
+ if (incoming.get(1)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
index fff212d..fea95c3 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
@@ -40,6 +40,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);
private static final org.apache.thrift.protocol.TField PARTS_FOUND_FIELD_DESC = new org.apache.thrift.protocol.TField("partsFound", org.apache.thrift.protocol.TType.I64, (short)2);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)3);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -49,11 +50,17 @@ import org.slf4j.LoggerFactory;
private List<ColumnStatisticsObj> colStats; // required
private long partsFound; // required
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
COL_STATS((short)1, "colStats"),
- PARTS_FOUND((short)2, "partsFound");
+ PARTS_FOUND((short)2, "partsFound"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)3, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -72,6 +79,8 @@ import org.slf4j.LoggerFactory;
return COL_STATS;
case 2: // PARTS_FOUND
return PARTS_FOUND;
+ case 3: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -114,6 +123,7 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __PARTSFOUND_ISSET_ID = 0;
private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -122,6 +132,8 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
tmpMap.put(_Fields.PARTS_FOUND, new org.apache.thrift.meta_data.FieldMetaData("partsFound", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AggrStats.class, metaDataMap);
}
@@ -152,6 +164,9 @@ import org.slf4j.LoggerFactory;
this.colStats = __this__colStats;
}
this.partsFound = other.partsFound;
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public AggrStats deepCopy() {
@@ -163,6 +178,7 @@ import org.slf4j.LoggerFactory;
this.colStats = null;
setPartsFoundIsSet(false);
this.partsFound = 0;
+ this.isStatsCompliant = null;
}
public int getColStatsSize() {
@@ -225,6 +241,37 @@ import org.slf4j.LoggerFactory;
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARTSFOUND_ISSET_ID, value);
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case COL_STATS:
@@ -243,6 +290,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -254,6 +309,9 @@ import org.slf4j.LoggerFactory;
case PARTS_FOUND:
return getPartsFound();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -269,6 +327,8 @@ import org.slf4j.LoggerFactory;
return isSetColStats();
case PARTS_FOUND:
return isSetPartsFound();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -304,6 +364,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -321,6 +390,11 @@ import org.slf4j.LoggerFactory;
if (present_partsFound)
list.add(partsFound);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -352,6 +426,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -383,6 +467,16 @@ import org.slf4j.LoggerFactory;
sb.append("partsFound:");
sb.append(this.partsFound);
first = false;
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -463,6 +557,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -491,6 +593,13 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(PARTS_FOUND_FIELD_DESC);
oprot.writeI64(struct.partsFound);
oprot.writeFieldEnd();
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -516,6 +625,14 @@ import org.slf4j.LoggerFactory;
}
}
oprot.writeI64(struct.partsFound);
+ BitSet optionals = new BitSet();
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -535,6 +652,11 @@ import org.slf4j.LoggerFactory;
struct.setColStatsIsSet(true);
struct.partsFound = iprot.readI64();
struct.setPartsFoundIsSet(true);
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
index 6ce7214..9fd43cc 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
@@ -40,6 +40,9 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField STATS_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("statsDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1);
private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("statsObj", org.apache.thrift.protocol.TType.LIST, (short)2);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)5);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -49,11 +52,21 @@ import org.slf4j.LoggerFactory;
private ColumnStatisticsDesc statsDesc; // required
private List<ColumnStatisticsObj> statsObj; // required
+ private long txnId; // optional
+ private String validWriteIdList; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
STATS_DESC((short)1, "statsDesc"),
- STATS_OBJ((short)2, "statsObj");
+ STATS_OBJ((short)2, "statsObj"),
+ TXN_ID((short)3, "txnId"),
+ VALID_WRITE_ID_LIST((short)4, "validWriteIdList"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)5, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -72,6 +85,12 @@ import org.slf4j.LoggerFactory;
return STATS_DESC;
case 2: // STATS_OBJ
return STATS_OBJ;
+ case 3: // TXN_ID
+ return TXN_ID;
+ case 4: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
+ case 5: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -112,6 +131,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -120,11 +142,19 @@ import org.slf4j.LoggerFactory;
tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("statsObj", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatistics.class, metaDataMap);
}
public ColumnStatistics() {
+ this.txnId = -1L;
+
}
public ColumnStatistics(
@@ -140,6 +170,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public ColumnStatistics(ColumnStatistics other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetStatsDesc()) {
this.statsDesc = new ColumnStatisticsDesc(other.statsDesc);
}
@@ -150,6 +181,13 @@ import org.slf4j.LoggerFactory;
}
this.statsObj = __this__statsObj;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public ColumnStatistics deepCopy() {
@@ -160,6 +198,10 @@ import org.slf4j.LoggerFactory;
public void clear() {
this.statsDesc = null;
this.statsObj = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
+ this.isStatsCompliant = null;
}
public ColumnStatisticsDesc getStatsDesc() {
@@ -223,6 +265,82 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case STATS_DESC:
@@ -241,6 +359,30 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -252,6 +394,15 @@ import org.slf4j.LoggerFactory;
case STATS_OBJ:
return getStatsObj();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -267,6 +418,12 @@ import org.slf4j.LoggerFactory;
return isSetStatsDesc();
case STATS_OBJ:
return isSetStatsObj();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -302,6 +459,33 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -319,6 +503,21 @@ import org.slf4j.LoggerFactory;
if (present_statsObj)
list.add(statsObj);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -350,6 +549,36 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -385,6 +614,32 @@ import org.slf4j.LoggerFactory;
sb.append(this.statsObj);
}
first = false;
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -415,6 +670,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -467,6 +724,30 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 4: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 5: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -497,6 +778,25 @@ import org.slf4j.LoggerFactory;
}
oprot.writeFieldEnd();
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -522,6 +822,26 @@ import org.slf4j.LoggerFactory;
_iter272.write(oprot);
}
}
+ BitSet optionals = new BitSet();
+ if (struct.isSetTxnId()) {
+ optionals.set(0);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(1);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -542,6 +862,19 @@ import org.slf4j.LoggerFactory;
}
}
struct.setStatsObjIsSet(true);
+ BitSet incoming = iprot.readBitSet(3);
+ if (incoming.get(0)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
[02/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index e99f888..7ae2cc1 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -80,140 +80,29 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.DatabaseName;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.common.*;
import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.FunctionType;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.HiveObjectType;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
-import org.apache.hadoop.hive.metastore.api.ResourceType;
-import org.apache.hadoop.hive.metastore.api.ResourceUri;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
-import org.apache.hadoop.hive.metastore.api.SchemaType;
-import org.apache.hadoop.hive.metastore.api.SchemaValidation;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SerdeType;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMPool;
-import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
import org.apache.hadoop.hive.metastore.metrics.Metrics;
import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
-import org.apache.hadoop.hive.metastore.model.MCatalog;
-import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
-import org.apache.hadoop.hive.metastore.model.MConstraint;
-import org.apache.hadoop.hive.metastore.model.MCreationMetadata;
-import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
-import org.apache.hadoop.hive.metastore.model.MDatabase;
-import org.apache.hadoop.hive.metastore.model.MDelegationToken;
-import org.apache.hadoop.hive.metastore.model.MFieldSchema;
-import org.apache.hadoop.hive.metastore.model.MFunction;
-import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
-import org.apache.hadoop.hive.metastore.model.MISchema;
-import org.apache.hadoop.hive.metastore.model.MMasterKey;
-import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties;
-import org.apache.hadoop.hive.metastore.model.MNotificationLog;
-import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
-import org.apache.hadoop.hive.metastore.model.MOrder;
-import org.apache.hadoop.hive.metastore.model.MPartition;
-import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
-import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
-import org.apache.hadoop.hive.metastore.model.MPartitionEvent;
-import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
-import org.apache.hadoop.hive.metastore.model.MResourceUri;
-import org.apache.hadoop.hive.metastore.model.MRole;
-import org.apache.hadoop.hive.metastore.model.MRoleMap;
-import org.apache.hadoop.hive.metastore.model.MRuntimeStat;
-import org.apache.hadoop.hive.metastore.model.MSchemaVersion;
-import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
-import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
-import org.apache.hadoop.hive.metastore.model.MStringList;
-import org.apache.hadoop.hive.metastore.model.MTable;
-import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
-import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
-import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
-import org.apache.hadoop.hive.metastore.model.MType;
-import org.apache.hadoop.hive.metastore.model.MVersionTable;
-import org.apache.hadoop.hive.metastore.model.MWMMapping;
+import org.apache.hadoop.hive.metastore.model.*;
import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType;
-import org.apache.hadoop.hive.metastore.model.MWMPool;
-import org.apache.hadoop.hive.metastore.model.MWMResourcePlan;
import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status;
-import org.apache.hadoop.hive.metastore.model.MWMTrigger;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.apache.hadoop.hive.metastore.utils.JavaUtils;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+import org.apache.hive.common.util.TxnIdUtils;
import org.apache.thrift.TException;
import org.datanucleus.AbstractNucleusContext;
import org.datanucleus.ClassLoaderResolver;
@@ -1303,10 +1192,16 @@ public class ObjectStore implements RawStore, Configurable {
@Override
public void createTable(Table tbl) throws InvalidObjectException, MetaException {
boolean commited = false;
+ MTable mtbl = null;
+
try {
openTransaction();
- MTable mtbl = convertToMTable(tbl);
+ mtbl = convertToMTable(tbl);
+ if (TxnUtils.isTransactionalTable(tbl)) {
+ mtbl.setTxnId(tbl.getTxnId());
+ mtbl.setWriteIdList(tbl.getValidWriteIdList());
+ }
pm.makePersistent(mtbl);
if (tbl.getCreationMetadata() != null) {
@@ -1417,6 +1312,8 @@ public class ObjectStore implements RawStore, Configurable {
TableName.getQualified(catName, dbName, tableName));
}
+ Table table = convertToTable(tbl);
+
List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
catName, dbName, tableName, null);
if (CollectionUtils.isNotEmpty(tabConstraints)) {
@@ -1515,17 +1412,47 @@ public class ObjectStore implements RawStore, Configurable {
return mConstraints;
}
+ private static String getFullyQualifiedTableName(String dbName, String tblName) {
+ return ((dbName == null || dbName.isEmpty()) ? "" : "\"" + dbName + "\".\"")
+ + "\"" + tblName + "\"";
+ }
+
@Override
- public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+ public Table getTable(String catName, String dbName, String tableName)
+ throws MetaException {
+ return getTable(catName, dbName, tableName, -1, null);
+ }
+
+ @Override
+ public Table getTable(String catName, String dbName, String tableName,
+ long txnId, String writeIdList)
+ throws MetaException {
boolean commited = false;
Table tbl = null;
try {
openTransaction();
- tbl = convertToTable(getMTable(catName, dbName, tableName));
+ MTable mtable = getMTable(catName, dbName, tableName);
+ tbl = convertToTable(mtable);
// Retrieve creation metadata if needed
- if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
- tbl.setCreationMetadata(
- convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
+ if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) tbl.setCreationMetadata(
+ convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
+
+ // If transactional non partitioned table,
+ // check whether the current version table statistics
+ // in the metastore comply with the client query's snapshot isolation.
+ // Note: a partitioned table has table stats and table snapshot in MPartiiton.
+ if (writeIdList != null) {
+ if (tbl != null
+ && TxnUtils.isTransactionalTable(tbl)
+ && tbl.getPartitionKeysSize() == 0) {
+ if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList)) {
+ tbl.setIsStatsCompliant(IsolationLevelCompliance.YES);
+ } else {
+ tbl.setIsStatsCompliant(IsolationLevelCompliance.NO);
+ // Do not make persistent the following state since it is the query specific (not global).
+ StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
+ }
+ }
}
commited = commitTransaction();
} finally {
@@ -2312,6 +2239,7 @@ public class ObjectStore implements RawStore, Configurable {
}
@Override
+ // TODO: Make sure each Partition has list.
public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
throws InvalidObjectException, MetaException {
boolean success = false;
@@ -2324,13 +2252,32 @@ public class ObjectStore implements RawStore, Configurable {
tabGrants = this.listAllTableGrants(catName, dbName, tblName);
tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, tblName);
}
+
List<Object> toPersist = new ArrayList<>();
+ boolean firstSpan = true;
+ boolean transactional = false;
for (Partition part : parts) {
if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table "
+ dbName + "." + tblName + ": " + part);
}
- MPartition mpart = convertToMPart(part, table, true);
+ MPartition mpart = convertToMPart(part, true);
+ if (firstSpan) {
+ if (part.getValidWriteIdList() != null &&
+ TxnUtils.isTransactionalTable(table.getParameters())) {
+ transactional = true;
+ }
+ firstSpan = false;
+ }
+ if (transactional) {
+ // TODO: change the following two lines when AddPartitionRequest is changed.
+ // Check concurrent INSERT case and set false to the flag.
+ if (!isCurrentStatsValidForTheQuery(mpart, part.getTxnId(), part.getValidWriteIdList())) {
+ StatsSetupConst.setBasicStatsState(mpart.getParameters(), StatsSetupConst.FALSE);
+ }
+ mpart.setTxnId(part.getTxnId());
+ mpart.setWriteIdList(part.getValidWriteIdList());
+ }
toPersist.add(mpart);
int now = (int)(System.currentTimeMillis()/1000);
if (tabGrants != null) {
@@ -2378,6 +2325,7 @@ public class ObjectStore implements RawStore, Configurable {
}
@Override
+ // TODO: Check PartitionSpecProxy to have the list
public boolean addPartitions(String catName, String dbName, String tblName,
PartitionSpecProxy partitionSpec, boolean ifNotExists)
throws InvalidObjectException, MetaException {
@@ -2400,13 +2348,30 @@ public class ObjectStore implements RawStore, Configurable {
PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
int now = (int)(System.currentTimeMillis()/1000);
-
+ boolean firstSpan = true;
+ boolean transactional = false;
List<FieldSchema> partitionKeys = convertToFieldSchemas(table.getPartitionKeys());
while (iterator.hasNext()) {
Partition part = iterator.next();
if (isValidPartition(part, partitionKeys, ifNotExists)) {
MPartition mpart = convertToMPart(part, table, true);
+ if (firstSpan) {
+ if (part.getValidWriteIdList() != null &&
+ TxnUtils.isTransactionalTable(table.getParameters())) {
+ transactional = true;
+ }
+ firstSpan = false;
+ }
+ if (transactional) {
+ // Check concurrent INSERT case and set false to the flag.
+ if (!isCurrentStatsValidForTheQuery(mpart, part.getTxnId(), part.getValidWriteIdList())) {
+ StatsSetupConst.setBasicStatsState(mpart.getParameters(), StatsSetupConst.FALSE);
+ }
+ mpart.setTxnId(part.getTxnId());
+ mpart.setWriteIdList(part.getValidWriteIdList());
+ }
+
pm.makePersistent(mpart);
if (tabGrants != null) {
for (MTablePrivilege tab : tabGrants) {
@@ -2442,6 +2407,7 @@ public class ObjectStore implements RawStore, Configurable {
MetaException {
boolean success = false;
boolean commited = false;
+
try {
String catName = part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf);
MTable table = this.getMTable(catName, part.getDbName(), part.getTableName());
@@ -2454,6 +2420,11 @@ public class ObjectStore implements RawStore, Configurable {
}
openTransaction();
MPartition mpart = convertToMPart(part, true);
+ if (part.getValidWriteIdList() != null &&
+ TxnUtils.isTransactionalTable(table.getParameters())) {
+ mpart.setTxnId(part.getTxnId());
+ mpart.setWriteIdList(part.getValidWriteIdList());
+ }
pm.makePersistent(mpart);
int now = (int)(System.currentTimeMillis()/1000);
@@ -2495,14 +2466,37 @@ public class ObjectStore implements RawStore, Configurable {
@Override
public Partition getPartition(String catName, String dbName, String tableName,
List<String> part_vals) throws NoSuchObjectException, MetaException {
+ return getPartition(catName, dbName, tableName, part_vals, -1, null);
+ }
+
+ @Override
+ public Partition getPartition(String catName, String dbName, String tableName,
+ List<String> part_vals,
+ long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException {
openTransaction();
- Partition part = convertToPart(getMPartition(catName, dbName, tableName, part_vals));
+ MTable table = this.getMTable(catName, dbName, tableName);
+ MPartition mpart = getMPartition(catName, dbName, tableName, part_vals);
+ Partition part = convertToPart(mpart);
commitTransaction();
if(part == null) {
throw new NoSuchObjectException("partition values="
+ part_vals.toString());
}
part.setValues(part_vals);
+ // If transactional table partition, check whether the current version partition
+ // statistics in the metastore comply with the client query's snapshot isolation.
+ if (writeIdList != null) {
+ if (TxnUtils.isTransactionalTable(table.getParameters())) {
+ if (isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList)) {
+ part.setIsStatsCompliant(IsolationLevelCompliance.YES);
+ } else {
+ part.setIsStatsCompliant(IsolationLevelCompliance.NO);
+ // Do not make persistent the following state since it is query specific (not global).
+ StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE);
+ }
+ }
+ }
return part;
}
@@ -3031,7 +3025,7 @@ public class ObjectStore implements RawStore, Configurable {
TableName.getQualified(catName, dbName, tableName), filter, cols);
List<String> partitionNames = null;
List<Partition> partitions = null;
- Table tbl = getTable(catName, dbName, tableName);
+ Table tbl = getTable(catName, dbName, tableName, -1, null);
try {
// Get partitions by name - ascending or descending
partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending,
@@ -3164,7 +3158,8 @@ public class ObjectStore implements RawStore, Configurable {
if (applyDistinct) {
partValuesSelect.append("DISTINCT ");
}
- List<FieldSchema> partitionKeys = getTable(catName, dbName, tableName).getPartitionKeys();
+ List<FieldSchema> partitionKeys =
+ getTable(catName, dbName, tableName, -1, null).getPartitionKeys();
for (FieldSchema key : cols) {
partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", ");
}
@@ -3246,7 +3241,7 @@ public class ObjectStore implements RawStore, Configurable {
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tableName = normalizeIdentifier(tableName);
- Table table = getTable(catName, dbName, tableName);
+ Table table = getTable(catName, dbName, tableName, -1, null);
if (table == null) {
throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tableName)
+ " table not found");
@@ -3622,7 +3617,8 @@ public class ObjectStore implements RawStore, Configurable {
protected T results = null;
public GetHelper(String catalogName, String dbName, String tblName,
- boolean allowSql, boolean allowJdo) throws MetaException {
+ boolean allowSql, boolean allowJdo)
+ throws MetaException {
assert allowSql || allowJdo;
this.allowJdo = allowJdo;
this.catName = (catalogName != null) ? normalizeIdentifier(catalogName) : null;
@@ -4140,6 +4136,18 @@ public class ObjectStore implements RawStore, Configurable {
oldt.setViewExpandedText(newt.getViewExpandedText());
oldt.setRewriteEnabled(newt.isRewriteEnabled());
+ // If transactional, update MTable to have txnId and the writeIdList
+ // for the current Stats updater query.
+ if (newTable.getValidWriteIdList() != null &&
+ TxnUtils.isTransactionalTable(newTable)) {
+ // Check concurrent INSERT case and set false to the flag.
+ if (!isCurrentStatsValidForTheQuery(oldt, newTable.getTxnId(), newTable.getValidWriteIdList())) {
+ StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE);
+ }
+ oldt.setTxnId(newTable.getTxnId());
+ oldt.setWriteIdList(newTable.getValidWriteIdList());
+ }
+
// commit the changes
success = commitTransaction();
} finally {
@@ -4192,6 +4200,7 @@ public class ObjectStore implements RawStore, Configurable {
catName = normalizeIdentifier(catName);
name = normalizeIdentifier(name);
dbname = normalizeIdentifier(dbname);
+ MTable table = this.getMTable(catName, dbname, name);
MPartition oldp = getMPartition(catName, dbname, name, part_vals);
MPartition newp = convertToMPart(newPart, false);
MColumnDescriptor oldCD = null;
@@ -4214,6 +4223,17 @@ public class ObjectStore implements RawStore, Configurable {
if (newp.getLastAccessTime() != oldp.getLastAccessTime()) {
oldp.setLastAccessTime(newp.getLastAccessTime());
}
+ // If transactional, add/update the MUPdaterTransaction
+ // for the current updater query.
+ if (newPart.getValidWriteIdList() != null &&
+ TxnUtils.isTransactionalTable(table.getParameters())) {
+ oldp.setTxnId(newPart.getTxnId());
+ oldp.setWriteIdList(newPart.getValidWriteIdList());
+ // Check concurrent INSERT case and set false to the flag.
+ if (!isCurrentStatsValidForTheQuery(oldp, newp.getTxnId(), newp.getWriteIdList())) {
+ StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
+ }
+ }
return oldCD;
}
@@ -4245,7 +4265,8 @@ public class ObjectStore implements RawStore, Configurable {
@Override
public void alterPartitions(String catName, String dbname, String name,
- List<List<String>> part_vals, List<Partition> newParts)
+ List<List<String>> part_vals, List<Partition> newParts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
boolean success = false;
Exception e = null;
@@ -4255,6 +4276,10 @@ public class ObjectStore implements RawStore, Configurable {
Set<MColumnDescriptor> oldCds = new HashSet<>();
for (Partition tmpPart: newParts) {
List<String> tmpPartVals = part_val_itr.next();
+ if (txnId > 0) {
+ tmpPart.setTxnId(txnId);
+ tmpPart.setValidWriteIdList(writeIdList);
+ }
MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, tmpPartVals, tmpPart);
if (oldCd != null) {
oldCds.add(oldCd);
@@ -6131,7 +6156,9 @@ public class ObjectStore implements RawStore, Configurable {
} else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) {
boolean found = false;
- Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject.getObjectName());
+ Table tabObj =
+ this.getTable(catName, hiveObject.getDbName(),
+ hiveObject.getObjectName(), -1, null);
String partName = null;
if (hiveObject.getPartValues() != null) {
partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues());
@@ -6165,7 +6192,7 @@ public class ObjectStore implements RawStore, Configurable {
} else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) {
Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject
- .getObjectName());
+ .getObjectName(), -1, null);
String partName = null;
if (hiveObject.getPartValues() != null) {
partName = Warehouse.makePartName(tabObj.getPartitionKeys(),
@@ -7687,7 +7714,7 @@ public class ObjectStore implements RawStore, Configurable {
query
.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4," +
"java.lang.String t5");
- Table tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid.
+ Table tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid.
if (null == tbl) {
throw new UnknownTableException("Table: " + tblName + " is not found.");
}
@@ -7713,7 +7740,7 @@ public class ObjectStore implements RawStore, Configurable {
Table tbl = null;
try{
openTransaction();
- tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid.
+ tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid.
if(null == tbl) {
throw new UnknownTableException("Table: "+ tblName + " is not found.");
}
@@ -8442,7 +8469,10 @@ public class ObjectStore implements RawStore, Configurable {
}
}
- private List<MTableColumnStatistics> getMTableColumnStatistics(Table table, List<String> colNames, QueryWrapper queryWrapper)
+ private List<MTableColumnStatistics> getMTableColumnStatistics(
+ Table table,
+ List<String> colNames,
+ QueryWrapper queryWrapper)
throws MetaException {
if (colNames == null || colNames.isEmpty()) {
return Collections.emptyList();
@@ -8517,9 +8547,41 @@ public class ObjectStore implements RawStore, Configurable {
}
@Override
- public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+ public ColumnStatistics getTableColumnStatistics(
+ String catName,
+ String dbName,
+ String tableName,
List<String> colNames) throws MetaException, NoSuchObjectException {
- return getTableColumnStatisticsInternal(catName, dbName, tableName, colNames, true, true);
+ return getTableColumnStatisticsInternal(
+ catName, dbName, tableName, colNames, true, true);
+ }
+
+ @Override
+ public ColumnStatistics getTableColumnStatistics(
+ String catName,
+ String dbName,
+ String tableName,
+ List<String> colNames,
+ long txnId,
+ String writeIdList) throws MetaException, NoSuchObjectException {
+ IsolationLevelCompliance iLL = IsolationLevelCompliance.UNKNOWN;
+ // If the current stats in the metastore doesn't comply with
+ // the isolation level of the query, set No to the compliance flag.
+ if (writeIdList != null) {
+ MTable table = this.getMTable(catName, dbName, tableName);
+ if (TxnUtils.isTransactionalTable(table.getParameters()) &&
+ !isCurrentStatsValidForTheQuery(table, txnId, writeIdList)) {
+ iLL = IsolationLevelCompliance.NO;
+ } else {
+ iLL = IsolationLevelCompliance.YES;
+ }
+ }
+ ColumnStatistics cS = getTableColumnStatisticsInternal(
+ catName, dbName, tableName, colNames, true, true);
+ if (cS != null) {
+ cS.setIsStatsCompliant(iLL);
+ }
+ return cS;
}
protected ColumnStatistics getTableColumnStatisticsInternal(
@@ -8538,7 +8600,8 @@ public class ObjectStore implements RawStore, Configurable {
QueryWrapper queryWrapper = new QueryWrapper();
try {
- List<MTableColumnStatistics> mStats = getMTableColumnStatistics(getTable(), colNames, queryWrapper);
+ List<MTableColumnStatistics> mStats =
+ getMTableColumnStatistics(getTable(), colNames, queryWrapper);
if (mStats.isEmpty()) {
return null;
}
@@ -8568,6 +8631,31 @@ public class ObjectStore implements RawStore, Configurable {
catName, dbName, tableName, partNames, colNames, true, true);
}
+ @Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tableName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ // If the current stats in the metastore doesn't comply with
+ // the isolation level of the query, return null.
+ if (writeIdList != null) {
+ if (partNames == null && partNames.isEmpty()) {
+ return null;
+ }
+ MTable mtbl = getMTable(catName, dbName, tableName);
+ MPartition mpart = getMPartition(catName, dbName, tableName, Warehouse.getPartValuesFromPartName(partNames.get(0)));
+ if (TxnUtils.isTransactionalTable(mtbl.getParameters()) &&
+ !isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList)) {
+ // TODO: Return one return structure with isolation flag
+ // instead of a List or null.
+ return null;
+ }
+ }
+ return getPartitionColumnStatisticsInternal(
+ catName, dbName, tableName, partNames, colNames, true, true);
+ }
+
protected List<ColumnStatistics> getPartitionColumnStatisticsInternal(
String catName, String dbName, String tableName, final List<String> partNames, final List<String> colNames,
boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException {
@@ -8616,10 +8704,27 @@ public class ObjectStore implements RawStore, Configurable {
}.run(true);
}
+ @Override
+ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
+ final List<String> partNames, final List<String> colNames,
+ long txnId, String writeIdList) throws MetaException, NoSuchObjectException {
+ // If the current stats in the metastore doesn't comply with
+ // the isolation level of the query, return null.
+ if (writeIdList != null) {
+ MTable mtbl = getMTable(catName, dbName, tblName);
+ MPartition mpart = getMPartition(catName, dbName, tblName, partNames);
+ if (TxnUtils.isTransactionalTable(mtbl.getParameters()) &&
+ !isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList)) {
+ return null;
+ }
+ }
+ return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+ }
@Override
public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
- final List<String> partNames, final List<String> colNames) throws MetaException, NoSuchObjectException {
+ final List<String> partNames, final List<String> colNames)
+ throws MetaException, NoSuchObjectException {
final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(),
ConfVars.STATS_NDV_DENSITY_FUNCTION);
final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER);
@@ -8651,7 +8756,8 @@ public class ObjectStore implements RawStore, Configurable {
throws MetaException, NoSuchObjectException {
final boolean enableBitVector =
MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR);
- return new GetHelper<List<MetaStoreUtils.ColStatsObjWithSourceInfo>>(catName, dbName, null, true, false) {
+ return new GetHelper<List<MetaStoreUtils.ColStatsObjWithSourceInfo>>(
+ catName, dbName, null, true, false) {
@Override
protected List<MetaStoreUtils.ColStatsObjWithSourceInfo> getSqlResult(
GetHelper<List<MetaStoreUtils.ColStatsObjWithSourceInfo>> ctx) throws MetaException {
@@ -12109,4 +12215,78 @@ public class ObjectStore implements RawStore, Configurable {
return ret;
}
+ /**
+ * Return true if the current statistics in the Metastore is valid
+ * for the query of the given "txnId" and "queryValidWriteIdList".
+ *
+ * Note that a statistics entity is valid iff
+ * the stats is written by the current query or
+ * the conjunction of the following two are true:
+ * ~ COLUMN_STATE_ACCURATE(CSA) state is true
+ * ~ Isolation-level (snapshot) compliant with the query
+ * @param tbl MTable of the stats entity
+ * @param txnId transaction id of the query
+ * @param queryValidWriteIdList valid writeId list of the query
+ */
+ private boolean isCurrentStatsValidForTheQuery(
+ MTable tbl, long txnId, String queryValidWriteIdList)
+ throws MetaException {
+ return isCurrentStatsValidForTheQuery(tbl.getTxnId(), tbl.getParameters(), tbl.getWriteIdList(),
+ txnId, queryValidWriteIdList);
+ }
+
+ /**
+ * Return true if the current statistics in the Metastore is valid
+ * for the query of the given "txnId" and "queryValidWriteIdList".
+ *
+ * Note that a statistics entity is valid iff
+ * the stats is written by the current query or
+ * the conjunction of the following two are true:
+ * ~ COLUMN_STATE_ACCURATE(CSA) state is true
+ * ~ Isolation-level (snapshot) compliant with the query
+ * @param part MPartition of the stats entity
+ * @param txnId transaction id of the query
+ * @param queryValidWriteIdList valid writeId list of the query
+ */
+ private boolean isCurrentStatsValidForTheQuery(
+ MPartition part, long txnId, String queryValidWriteIdList)
+ throws MetaException {
+ return isCurrentStatsValidForTheQuery(part.getTxnId(), part.getParameters(), part.getWriteIdList(),
+ txnId, queryValidWriteIdList);
+ }
+
+ private boolean isCurrentStatsValidForTheQuery(
+ long statsTxnId, Map<String, String> statsParams, String statsWriteIdList,
+ long queryTxnId, String queryValidWriteIdList)
+ throws MetaException {
+ if (statsTxnId == queryTxnId) {
+ return true;
+ }
+
+ try {
+ if (TxnDbUtil.isOpenOrAbortedTransaction(conf, statsTxnId)) {
+ return false;
+ }
+ } catch (Exception e) {
+ throw new MetaException("Cannot check transaction state.");
+ }
+
+ // This COLUMN_STATS_ACCURATE(CSA) state checking also includes the case that the stats is
+ // written by an aborted transaction
+ if (!StatsSetupConst.areBasicStatsUptoDate(statsParams)) {
+ return false;
+ }
+
+ // Now we have the table stats being true. If the TOTAL_SIZE of the
+ // table/partition is 0, return 'true' from this method.
+ // Newly initialized empty table has no table snapshot yet.
+ if (Long.parseLong(statsParams.get(StatsSetupConst.NUM_FILES)) == 0) {
+ return true;
+ }
+
+ ValidWriteIdList list4Stats = new ValidReaderWriteIdList(statsWriteIdList);
+ ValidWriteIdList list4TheQuery = new ValidReaderWriteIdList(queryValidWriteIdList);
+
+ return TxnIdUtils.checkEquivalentWriteIds(list4Stats, list4TheQuery);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index bbbdf21..e1c1ab9 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -19,10 +19,7 @@
package org.apache.hadoop.hive.metastore;
import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+import org.apache.hadoop.hive.metastore.api.*;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
@@ -34,59 +31,6 @@ import java.util.Map;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMPool;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
import org.apache.thrift.TException;
@@ -266,6 +210,20 @@ public interface RawStore extends Configurable {
Table getTable(String catalogName, String dbName, String tableName) throws MetaException;
/**
+ * Get a table object.
+ * @param catalogName catalog the table is in.
+ * @param dbName database the table is in.
+ * @param tableName table name.
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return table object, or null if no such table exists (wow it would be nice if we either
+ * consistently returned null or consistently threw NoSuchObjectException).
+ * @throws MetaException something went wrong in the RDBMS
+ */
+ Table getTable(String catalogName, String dbName, String tableName,
+ long txnId, String writeIdList) throws MetaException;
+
+ /**
* Add a partition.
* @param part partition to add
* @return true if the partition was successfully added.
@@ -317,6 +275,22 @@ public interface RawStore extends Configurable {
*/
Partition getPartition(String catName, String dbName, String tableName,
List<String> part_vals) throws MetaException, NoSuchObjectException;
+ /**
+ * Get a partition.
+ * @param catName catalog name.
+ * @param dbName database name.
+ * @param tableName table name.
+ * @param part_vals partition values for this table.
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return the partition.
+ * @throws MetaException error reading from RDBMS.
+ * @throws NoSuchObjectException no partition matching this specification exists.
+ */
+ Partition getPartition(String catName, String dbName, String tableName,
+ List<String> part_vals,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException;
/**
* Check whether a partition exists.
@@ -525,11 +499,14 @@ public interface RawStore extends Configurable {
* @param new_parts list of new partitions. The order must match the old partitions described in
* part_vals_list. Each of these should be a complete copy of the new
* partition, not just the pieces to update.
+ * @param txnId transaction id of the transaction that called this method.
+ * @param writeIdList valid write id list of the transaction on the current table
* @throws InvalidObjectException One of the indicated partitions does not exist.
* @throws MetaException error accessing the RDBMS.
*/
void alterPartitions(String catName, String db_name, String tbl_name,
- List<List<String>> part_vals_list, List<Partition> new_parts)
+ List<List<String>> part_vals_list, List<Partition> new_parts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException;
/**
@@ -901,6 +878,25 @@ public interface RawStore extends Configurable {
List<String> colName) throws MetaException, NoSuchObjectException;
/**
+ * Returns the relevant column statistics for a given column in a given table in a given database
+ * if such statistics exist.
+ * @param catName catalog name.
+ * @param dbName name of the database, defaults to current database
+ * @param tableName name of the table
+ * @param colName names of the columns for which statistics is requested
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return Relevant column statistics for the column for the given table
+ * @throws NoSuchObjectException No such table
+ * @throws MetaException error accessing the RDBMS
+ *
+ */
+ ColumnStatistics getTableColumnStatistics(
+ String catName, String dbName, String tableName,
+ List<String> colName, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException;
+
+ /**
* Get statistics for a partition for a set of columns.
* @param catName catalog name.
* @param dbName database name.
@@ -916,6 +912,25 @@ public interface RawStore extends Configurable {
throws MetaException, NoSuchObjectException;
/**
+ * Get statistics for a partition for a set of columns.
+ * @param catName catalog name.
+ * @param dbName database name.
+ * @param tblName table name.
+ * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...]
+ * @param colNames list of columns to get stats for
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return list of statistics objects
+ * @throws MetaException error accessing the RDBMS
+ * @throws NoSuchObjectException no such partition.
+ */
+ List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tblName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException;
+
+ /**
* Deletes column statistics if present associated with a given db, table, partition and col. If
* null is passed instead of a colName, stats when present for all columns associated
* with a given db, table and partition are deleted.
@@ -1159,6 +1174,25 @@ public interface RawStore extends Configurable {
List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
/**
+ * Get aggregated stats for a table or partition(s).
+ * @param catName catalog name.
+ * @param dbName database name.
+ * @param tblName table name.
+ * @param partNames list of partition names. These are the names of the partitions, not
+ * values.
+ * @param colNames list of column names
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return aggregated stats
+ * @throws MetaException error accessing RDBMS
+ * @throws NoSuchObjectException no such table or partition
+ */
+ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException;
+
+ /**
* Get column stats for all partitions of all tables in the database
* @param catName catalog name
* @param dbName database name
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 7c3588d..97b926b 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -49,68 +49,10 @@ import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
import org.apache.hadoop.hive.metastore.RawStore;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType;
import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator;
import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
@@ -879,11 +821,20 @@ public class CachedStore implements RawStore, Configurable {
@Override
public Table getTable(String catName, String dbName, String tblName) throws MetaException {
+ return getTable(catName, dbName, tblName, -1, null);
+ }
+
+ // TODO: if writeIdList is not null, check isolation level compliance for SVS,
+ // possibly with getTableFromCache() with table snapshot in cache.
+ @Override
+ public Table getTable(String catName, String dbName, String tblName,
+ long txnId, String writeIdList)
+ throws MetaException {
catName = normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
- return rawStore.getTable(catName, dbName, tblName);
+ return rawStore.getTable(catName, dbName, tblName, txnId,writeIdList);
}
Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
if (tbl == null) {
@@ -892,7 +843,7 @@ public class CachedStore implements RawStore, Configurable {
// let's move this table to the top of tblNamesBeingPrewarmed stack,
// so that it gets loaded to the cache faster and is available for subsequent requests
tblsPendingPrewarm.prioritizeTableForPrewarm(tblName);
- return rawStore.getTable(catName, dbName, tblName);
+ return rawStore.getTable(catName, dbName, tblName, txnId, writeIdList);
}
if (tbl != null) {
tbl.unsetPrivileges();
@@ -955,16 +906,26 @@ public class CachedStore implements RawStore, Configurable {
@Override
public Partition getPartition(String catName, String dbName, String tblName, List<String> part_vals)
throws MetaException, NoSuchObjectException {
+ return getPartition(catName, dbName, tblName, part_vals, -1, null);
+ }
+
+ // TODO: the same as getTable()
+ @Override
+ public Partition getPartition(String catName, String dbName, String tblName,
+ List<String> part_vals, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
catName = normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
- return rawStore.getPartition(catName, dbName, tblName, part_vals);
+ return rawStore.getPartition(
+ catName, dbName, tblName, part_vals, txnId, writeIdList);
}
Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals);
if (part == null) {
// The table containing the partition is not yet loaded in cache
- return rawStore.getPartition(catName, dbName, tblName, part_vals);
+ return rawStore.getPartition(
+ catName, dbName, tblName, part_vals, txnId, writeIdList);
}
return part;
}
@@ -1204,15 +1165,17 @@ public class CachedStore implements RawStore, Configurable {
@Override
public void alterPartitions(String catName, String dbName, String tblName,
- List<List<String>> partValsList, List<Partition> newParts)
+ List<List<String>> partValsList, List<Partition> newParts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
- rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
+ rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList);
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tblName = normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
return;
}
+ // TODO: modify the following method!
sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts);
}
@@ -1656,16 +1619,27 @@ public class CachedStore implements RawStore, Configurable {
@Override
public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName,
List<String> colNames) throws MetaException, NoSuchObjectException {
+ return getTableColumnStatistics(catName, dbName, tblName, colNames, -1, null);
+ }
+
+ // TODO: the same as getTable()
+ @Override
+ public ColumnStatistics getTableColumnStatistics(
+ String catName, String dbName, String tblName, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
catName = StringUtils.normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
- return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+ return rawStore.getTableColumnStatistics(
+ catName, dbName, tblName, colNames, txnId, writeIdList);
}
Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
if (table == null) {
// The table is not yet loaded in cache
- return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+ return rawStore.getTableColumnStatistics(
+ catName, dbName, tblName, colNames, txnId, writeIdList);
}
ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName);
List<ColumnStatisticsObj> colStatObjs =
@@ -1723,6 +1697,15 @@ public class CachedStore implements RawStore, Configurable {
}
@Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tblName, List<String> partNames,
+ List<String> colNames, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return rawStore.getPartitionColumnStatistics(
+ catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName,
List<String> partVals, String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
@@ -1743,17 +1726,28 @@ public class CachedStore implements RawStore, Configurable {
@Override
public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List<String> partNames,
List<String> colNames) throws MetaException, NoSuchObjectException {
+ return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, -1, null);
+ }
+
+ @Override
+ // TODO: the same as getTable()
+ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
List<ColumnStatisticsObj> colStats;
catName = normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
- rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+ rawStore.get_aggr_stats_for(
+ catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
}
Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
if (table == null) {
// The table is not yet loaded in cache
- return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+ return rawStore.get_aggr_stats_for(
+ catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
}
List<String> allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
if (partNames.size() == allPartNames.size()) {
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index ab03adb..a3bed58 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -293,7 +293,7 @@ public class MetastoreConf {
"hive.metastore.cached.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
"Name of the wrapped RawStore class"),
CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY("metastore.cached.rawstore.cache.update.frequency",
- "hive.metastore.cached.rawstore.cache.update.frequency", 60, TimeUnit.SECONDS,
+ "hive.metastore.cached.rawstore.cache.update.frequency", 60000, TimeUnit.SECONDS,
"The time after which metastore cache is updated from metastore DB."),
CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST("metastore.cached.rawstore.cached.object.whitelist",
"hive.metastore.cached.rawstore.cached.object.whitelist", ".*", "Comma separated list of regular expressions \n " +
@@ -648,7 +648,7 @@ public class MetastoreConf {
PWD("javax.jdo.option.ConnectionPassword", "javax.jdo.option.ConnectionPassword", "mine",
"password to use against metastore database"),
RAW_STORE_IMPL("metastore.rawstore.impl", "hive.metastore.rawstore.impl",
- "org.apache.hadoop.hive.metastore.ObjectStore",
+ "org.apache.hadoop.hive.metastore.cache.CachedStore",
"Name of the class that implements org.apache.riven.rawstore interface. \n" +
"This class is used to store and retrieval of raw metadata objects such as table, database"),
REPLCMDIR("metastore.repl.cmrootdir", "hive.repl.cmrootdir", "/user/hive/cmroot/",
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
index 4a97f89..56f9048 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
@@ -30,7 +30,8 @@ public class MPartition {
private int lastAccessTime;
private MStorageDescriptor sd;
private Map<String, String> parameters;
-
+ private long txnId;
+ private String writeIdList;
public MPartition() {}
@@ -152,4 +153,19 @@ public class MPartition {
this.createTime = createTime;
}
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
+
+ public String getWriteIdList() {
+ return writeIdList;
+ }
+
+ public void setWriteIdList(String writeIdList) {
+ this.writeIdList = writeIdList;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
index 50d9c5b..ff68eba 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
@@ -55,6 +55,7 @@ public class MPartitionColumnStatistics {
private Long numTrues;
private Long numFalses;
private long lastAnalyzed;
+ private long txnId;
public MPartitionColumnStatistics() {}
@@ -278,4 +279,12 @@ public class MPartitionColumnStatistics {
public void setBitVector(byte[] bitVector) {
this.bitVector = bitVector;
}
+
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
index 38ad479..7ef1ef6 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
@@ -1,3 +1,4 @@
+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -37,6 +38,8 @@ public class MTable {
private String viewExpandedText;
private boolean rewriteEnabled;
private String tableType;
+ private long txnId;
+ private String writeIdList;
public MTable() {}
@@ -270,4 +273,20 @@ public class MTable {
public String getTableType() {
return tableType;
}
+
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
+
+ public String getWriteIdList() {
+ return writeIdList;
+ }
+
+ public void setWriteIdList(String writeIdList) {
+ this.writeIdList = writeIdList;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
index 731cd6f..9d687e4 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
@@ -53,6 +53,7 @@ public class MTableColumnStatistics {
private Long numTrues;
private Long numFalses;
private long lastAnalyzed;
+ private long txnId;
public MTableColumnStatistics() {}
@@ -269,4 +270,12 @@ public class MTableColumnStatistics {
public void setBitVector(byte[] bitVector) {
this.bitVector = bitVector;
}
+
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 4e3068d..5072bbd 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hive.metastore.txn;
+import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.common.classification.RetrySemantics;
import org.apache.hadoop.hive.metastore.api.CompactionType;
import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -576,8 +577,8 @@ class CompactionTxnHandler extends TxnHandler {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
String s = "select txn_id from TXNS where " +
- "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
- "txn_state = '" + TXN_ABORTED + "'";
+ "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
+ "txn_state = '" + TXN_ABORTED + "'";
LOG.debug("Going to execute query <" + s + ">");
rs = stmt.executeQuery(s);
List<Long> txnids = new ArrayList<>();
@@ -587,10 +588,69 @@ class CompactionTxnHandler extends TxnHandler {
return;
}
Collections.sort(txnids);//easier to read logs
+
List<String> queries = new ArrayList<>();
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
+ // Turn off COLUMN_STATS_ACCURATE for txnids' components in TBLS and PARTITIONS
+ for (Long txnId : txnids) {
+ // Get table ids for the current txnId.
+ s = "select tbl_id from TBLS where txn_id = " + txnId;
+ LOG.debug("Going to execute query <" + s + ">");
+ rs = stmt.executeQuery(s);
+ List<Long> tblIds = new ArrayList<>();
+ while (rs.next()) tblIds.add(rs.getLong(1));
+ close(rs);
+ if(tblIds.size() <= 0) {
+ continue;
+ }
+
+ // Update COLUMN_STATS_AcCURATE.BASIC_STATS to false for each tableId.
+ prefix.append("delete from TABLE_PARAMS " +
+ " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and ");
+ suffix.append("");
+ TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, tblIds, "tbl_id", true, false);
+
+ for (String query : queries) {
+ LOG.debug("Going to execute update <" + query + ">");
+ int rc = stmt.executeUpdate(query);
+ LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from TBLS");
+ }
+
+ queries.clear();
+ prefix.setLength(0);
+ suffix.setLength(0);
+
+ // Get partition ids for the current txnId.
+ s = "select part_id from PARTITIONS where txn_id = " + txnId;
+ LOG.debug("Going to execute query <" + s + ">");
+ rs = stmt.executeQuery(s);
+ List<Long> ptnIds = new ArrayList<>();
+ while (rs.next()) ptnIds.add(rs.getLong(1));
+ close(rs);
+ if(ptnIds.size() <= 0) {
+ continue;
+ }
+
+ // Update COLUMN_STATS_AcCURATE.BASIC_STATS to false for each ptnId.
+ prefix.append("delete from PARTITION_PARAMS " +
+ " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and ");
+ suffix.append("");
+ TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, ptnIds, "part_id", true, false);
+
+ for (String query : queries) {
+ LOG.debug("Going to execute update <" + query + ">");
+ int rc = stmt.executeUpdate(query);
+ LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from PARTITIONS");
+ }
+
+ queries.clear();
+ prefix.setLength(0);
+ suffix.setLength(0);
+ }
+
+ // Delete from TXNS.
prefix.append("delete from TXNS where ");
suffix.append("");
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 50bfca3..bfbd928 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -28,9 +28,12 @@ import java.sql.Statement;
import java.util.Properties;
import com.google.common.annotations.VisibleForTesting;
+import jline.internal.Log;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.zookeeper.txn.TxnHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -195,6 +198,68 @@ public final class TxnDbUtil {
);
try {
+ stmt.execute("CREATE TABLE \"APP\".\"TBLS\" (\"TBL_ID\" BIGINT NOT NULL, " +
+ " \"CREATE_TIME\" INTEGER NOT NULL, \"DB_ID\" BIGINT, \"LAST_ACCESS_TIME\" INTEGER NOT NULL, " +
+ " \"OWNER\" VARCHAR(767), \"OWNER_TYPE\" VARCHAR(10), \"RETENTION\" INTEGER NOT NULL, " +
+ " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " +
+ " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " +
+ " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', \"TXN_ID\" BIGINT DEFAULT 0, " +
+ " \"WRITEID_LIST\" CLOB, " +
+ " PRIMARY KEY (TBL_ID))"
+ );
+ } catch (SQLException e) {
+ if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+ LOG.info("TBLS table already exist, ignoring");
+ } else {
+ throw e;
+ }
+ }
+
+ try {
+ stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" +
+ " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " +
+ " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " +
+ " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, \"TXN_ID\" BIGINT DEFAULT 0, " +
+ " \"WRITEID_LIST\" CLOB, " +
+ " PRIMARY KEY (PART_ID))"
+ );
+ } catch (SQLException e) {
+ if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+ LOG.info("PARTITIONS table already exist, ignoring");
+ } else {
+ throw e;
+ }
+ }
+
+ try {
+ stmt.execute("CREATE TABLE \"APP\".\"TABLE_PARAMS\" (" +
+ " \"TBL_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
+ " \"PARAM_VALUE\" CLOB, " +
+ " PRIMARY KEY (TBL_ID, PARAM_KEY))"
+ );
+ } catch (SQLException e) {
+ if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+ LOG.info("TABLE_PARAMS table already exist, ignoring");
+ } else {
+ throw e;
+ }
+ }
+
+ try {
+ stmt.execute("CREATE TABLE \"APP\".\"PARTITION_PARAMS\" (" +
+ " \"PART_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
+ " \"PARAM_VALUE\" VARCHAR(4000), " +
+ " PRIMARY KEY (PART_ID, PARAM_KEY))"
+ );
+ } catch (SQLException e) {
+ if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+ LOG.info("PARTITION_PARAMS table already exist, ignoring");
+ } else {
+ throw e;
+ }
+ }
+
+ try {
stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " +
"NULL, \"NEXT_VAL\" BIGINT NOT NULL)"
@@ -376,6 +441,35 @@ public final class TxnDbUtil {
}
/**
+ * Return true if the transaction of the given txnId is open.
+ * @param conf HiveConf
+ * @param txnId transaction id to search for
+ * @return
+ * @throws Exception
+ */
+ public static boolean isOpenOrAbortedTransaction(Configuration conf, long txnId) throws Exception {
+ Connection conn = null;
+ PreparedStatement stmt = null;
+ ResultSet rs = null;
+ try {
+ conn = getConnection(conf);
+ conn.setAutoCommit(false);
+ conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+
+ stmt = conn.prepareStatement("SELECT txn_id FROM TXNS WHERE txn_id = ?");
+ stmt.setLong(1, txnId);
+ rs = stmt.executeQuery();
+ if (!rs.next()) {
+ return false;
+ } else {
+ return true;
+ }
+ } finally {
+ closeResources(conn, stmt, rs);
+ }
+ }
+
+ /**
* Utility method used to run COUNT queries like "select count(*) from ..." against metastore tables
* @param countQuery countQuery text
* @return count countQuery result
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index d53279e..d9878a3 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -19,13 +19,7 @@ package org.apache.hadoop.hive.metastore.txn;
import java.io.PrintWriter;
import java.nio.ByteBuffer;
-import java.sql.Connection;
-import java.sql.Driver;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.SQLFeatureNotSupportedException;
-import java.sql.Savepoint;
-import java.sql.Statement;
+import java.sql.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index fa291d5..aac5811 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@ -25,11 +25,7 @@ import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.utils.JavaUtils;
@@ -46,6 +42,12 @@ import java.util.Map;
public class TxnUtils {
private static final Logger LOG = LoggerFactory.getLogger(TxnUtils.class);
+ // Transactional stats states
+ static final public char STAT_OPEN = 'o';
+ static final public char STAT_INVALID = 'i';
+ static final public char STAT_COMMITTED = 'c';
+ static final public char STAT_OBSOLETE = 's';
+
/**
* Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a
* {@link org.apache.hadoop.hive.common.ValidTxnList}. This assumes that the caller intends to
@@ -223,6 +225,14 @@ public class TxnUtils {
return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
}
+ public static boolean isTransactionalTable(Map<String, String> parameters) {
+ if (parameters == null) {
+ return false;
+ }
+ String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+ return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
+ }
+
/**
* Should produce the same result as
* {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/resources/package.jdo
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/resources/package.jdo b/standalone-metastore/src/main/resources/package.jdo
index 1be3e98..3997f53 100644
--- a/standalone-metastore/src/main/resources/package.jdo
+++ b/standalone-metastore/src/main/resources/package.jdo
@@ -210,6 +210,12 @@
<field name="tableType">
<column name="TBL_TYPE" length="128" jdbc-type="VARCHAR"/>
</field>
+ <field name="txnId">
+ <column name="TXN_ID"/>
+ </field>
+ <field name="writeIdList">
+ <column name="WRITEID_LIST" jdbc-type="CLOB" allows-null="true"/>
+ </field>
</class>
<class name="MCreationMetadata" identity-type="datastore" table="MV_CREATION_METADATA" detachable="true">
@@ -489,6 +495,12 @@
<column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
</value>
</field>
+ <field name="txnId">
+ <column name="TXN_ID"/>
+ </field>
+ <field name="writeIdList">
+ <column name="WRITEID_LIST" jdbc-type="CLOB" allows-null="true"/>
+ </field>
</class>
<class name="MIndex" table="IDXS" identity-type="datastore" detachable="true">
@@ -989,6 +1001,9 @@
<field name="lastAnalyzed">
<column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
</field>
+ <field name="txnId">
+ <column name="TXN_ID"/>
+ </field>
</class>
<class name="MPartitionColumnStatistics" table="PART_COL_STATS" identity-type="datastore" detachable="true">
@@ -1059,6 +1074,9 @@
<field name="lastAnalyzed">
<column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
</field>
+ <field name="txnId">
+ <column name="TXN_ID"/>
+ </field>
</class>
<class name="MVersionTable" table="VERSION" identity-type="datastore" detachable="true">
<datastore-identity>
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
index e818e1b..280fd4a 100644
--- a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
@@ -47,7 +47,7 @@ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT
CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
@@ -75,7 +75,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "
CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N');
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
@@ -106,7 +106,8 @@ CREATE TABLE "APP"."TAB_COL_STATS"(
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
"TBL_ID" BIGINT NOT NULL,
- "BIT_VECTOR" BLOB
+ "BIT_VECTOR" BLOB,
+ "TXN_ID" BIGINT DEFAULT 0
);
CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
@@ -155,7 +156,8 @@ CREATE TABLE "APP"."PART_COL_STATS"(
"NUM_FALSES" BIGINT,
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
- "PART_ID" BIGINT NOT NULL
+ "PART_ID" BIGINT NOT NULL,
+ "TXN_ID" BIGINT DEFAULT 0
);
CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
@@ -373,7 +375,6 @@ ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK
ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID");
-
-- foreign
ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
[08/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index bc4d168..359c417 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -215,6 +215,18 @@ const char* _kSchemaVersionStateNames[] = {
};
const std::map<int, const char*> _SchemaVersionState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(8, _kSchemaVersionStateValues, _kSchemaVersionStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+int _kIsolationLevelComplianceValues[] = {
+ IsolationLevelCompliance::YES,
+ IsolationLevelCompliance::NO,
+ IsolationLevelCompliance::UNKNOWN
+};
+const char* _kIsolationLevelComplianceNames[] = {
+ "YES",
+ "NO",
+ "UNKNOWN"
+};
+const std::map<int, const char*> _IsolationLevelCompliance_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kIsolationLevelComplianceValues, _kIsolationLevelComplianceNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
int _kFunctionTypeValues[] = {
FunctionType::JAVA
};
@@ -6435,6 +6447,21 @@ void Table::__set_ownerType(const PrincipalType::type val) {
__isset.ownerType = true;
}
+void Table::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void Table::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
+void Table::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -6629,6 +6656,32 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 19:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 20:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 21:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast249;
+ xfer += iprot->readI32(ecast249);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast249;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -6677,10 +6730,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitionKeys.size()));
- std::vector<FieldSchema> ::const_iterator _iter249;
- for (_iter249 = this->partitionKeys.begin(); _iter249 != this->partitionKeys.end(); ++_iter249)
+ std::vector<FieldSchema> ::const_iterator _iter250;
+ for (_iter250 = this->partitionKeys.begin(); _iter250 != this->partitionKeys.end(); ++_iter250)
{
- xfer += (*_iter249).write(oprot);
+ xfer += (*_iter250).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -6689,11 +6742,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter250;
- for (_iter250 = this->parameters.begin(); _iter250 != this->parameters.end(); ++_iter250)
+ std::map<std::string, std::string> ::const_iterator _iter251;
+ for (_iter251 = this->parameters.begin(); _iter251 != this->parameters.end(); ++_iter251)
{
- xfer += oprot->writeString(_iter250->first);
- xfer += oprot->writeString(_iter250->second);
+ xfer += oprot->writeString(_iter251->first);
+ xfer += oprot->writeString(_iter251->second);
}
xfer += oprot->writeMapEnd();
}
@@ -6741,6 +6794,21 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeI32((int32_t)this->ownerType);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 19);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 20);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 21);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -6766,31 +6834,13 @@ void swap(Table &a, Table &b) {
swap(a.creationMetadata, b.creationMetadata);
swap(a.catName, b.catName);
swap(a.ownerType, b.ownerType);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
swap(a.__isset, b.__isset);
}
-Table::Table(const Table& other251) {
- tableName = other251.tableName;
- dbName = other251.dbName;
- owner = other251.owner;
- createTime = other251.createTime;
- lastAccessTime = other251.lastAccessTime;
- retention = other251.retention;
- sd = other251.sd;
- partitionKeys = other251.partitionKeys;
- parameters = other251.parameters;
- viewOriginalText = other251.viewOriginalText;
- viewExpandedText = other251.viewExpandedText;
- tableType = other251.tableType;
- privileges = other251.privileges;
- temporary = other251.temporary;
- rewriteEnabled = other251.rewriteEnabled;
- creationMetadata = other251.creationMetadata;
- catName = other251.catName;
- ownerType = other251.ownerType;
- __isset = other251.__isset;
-}
-Table& Table::operator=(const Table& other252) {
+Table::Table(const Table& other252) {
tableName = other252.tableName;
dbName = other252.dbName;
owner = other252.owner;
@@ -6809,7 +6859,34 @@ Table& Table::operator=(const Table& other252) {
creationMetadata = other252.creationMetadata;
catName = other252.catName;
ownerType = other252.ownerType;
+ txnId = other252.txnId;
+ validWriteIdList = other252.validWriteIdList;
+ isStatsCompliant = other252.isStatsCompliant;
__isset = other252.__isset;
+}
+Table& Table::operator=(const Table& other253) {
+ tableName = other253.tableName;
+ dbName = other253.dbName;
+ owner = other253.owner;
+ createTime = other253.createTime;
+ lastAccessTime = other253.lastAccessTime;
+ retention = other253.retention;
+ sd = other253.sd;
+ partitionKeys = other253.partitionKeys;
+ parameters = other253.parameters;
+ viewOriginalText = other253.viewOriginalText;
+ viewExpandedText = other253.viewExpandedText;
+ tableType = other253.tableType;
+ privileges = other253.privileges;
+ temporary = other253.temporary;
+ rewriteEnabled = other253.rewriteEnabled;
+ creationMetadata = other253.creationMetadata;
+ catName = other253.catName;
+ ownerType = other253.ownerType;
+ txnId = other253.txnId;
+ validWriteIdList = other253.validWriteIdList;
+ isStatsCompliant = other253.isStatsCompliant;
+ __isset = other253.__isset;
return *this;
}
void Table::printTo(std::ostream& out) const {
@@ -6833,6 +6910,9 @@ void Table::printTo(std::ostream& out) const {
out << ", " << "creationMetadata="; (__isset.creationMetadata ? (out << to_string(creationMetadata)) : (out << "<null>"));
out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "<null>"));
out << ", " << "ownerType="; (__isset.ownerType ? (out << to_string(ownerType)) : (out << "<null>"));
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -6879,6 +6959,21 @@ void Partition::__set_catName(const std::string& val) {
__isset.catName = true;
}
+void Partition::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void Partition::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
+void Partition::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -6904,14 +6999,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->values.clear();
- uint32_t _size253;
- ::apache::thrift::protocol::TType _etype256;
- xfer += iprot->readListBegin(_etype256, _size253);
- this->values.resize(_size253);
- uint32_t _i257;
- for (_i257 = 0; _i257 < _size253; ++_i257)
+ uint32_t _size254;
+ ::apache::thrift::protocol::TType _etype257;
+ xfer += iprot->readListBegin(_etype257, _size254);
+ this->values.resize(_size254);
+ uint32_t _i258;
+ for (_i258 = 0; _i258 < _size254; ++_i258)
{
- xfer += iprot->readString(this->values[_i257]);
+ xfer += iprot->readString(this->values[_i258]);
}
xfer += iprot->readListEnd();
}
@@ -6964,17 +7059,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
- uint32_t _size258;
- ::apache::thrift::protocol::TType _ktype259;
- ::apache::thrift::protocol::TType _vtype260;
- xfer += iprot->readMapBegin(_ktype259, _vtype260, _size258);
- uint32_t _i262;
- for (_i262 = 0; _i262 < _size258; ++_i262)
+ uint32_t _size259;
+ ::apache::thrift::protocol::TType _ktype260;
+ ::apache::thrift::protocol::TType _vtype261;
+ xfer += iprot->readMapBegin(_ktype260, _vtype261, _size259);
+ uint32_t _i263;
+ for (_i263 = 0; _i263 < _size259; ++_i263)
{
- std::string _key263;
- xfer += iprot->readString(_key263);
- std::string& _val264 = this->parameters[_key263];
- xfer += iprot->readString(_val264);
+ std::string _key264;
+ xfer += iprot->readString(_key264);
+ std::string& _val265 = this->parameters[_key264];
+ xfer += iprot->readString(_val265);
}
xfer += iprot->readMapEnd();
}
@@ -6999,6 +7094,32 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 10:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 11:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 12:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast266;
+ xfer += iprot->readI32(ecast266);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast266;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -7019,10 +7140,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
- std::vector<std::string> ::const_iterator _iter265;
- for (_iter265 = this->values.begin(); _iter265 != this->values.end(); ++_iter265)
+ std::vector<std::string> ::const_iterator _iter267;
+ for (_iter267 = this->values.begin(); _iter267 != this->values.end(); ++_iter267)
{
- xfer += oprot->writeString((*_iter265));
+ xfer += oprot->writeString((*_iter267));
}
xfer += oprot->writeListEnd();
}
@@ -7051,11 +7172,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter266;
- for (_iter266 = this->parameters.begin(); _iter266 != this->parameters.end(); ++_iter266)
+ std::map<std::string, std::string> ::const_iterator _iter268;
+ for (_iter268 = this->parameters.begin(); _iter268 != this->parameters.end(); ++_iter268)
{
- xfer += oprot->writeString(_iter266->first);
- xfer += oprot->writeString(_iter266->second);
+ xfer += oprot->writeString(_iter268->first);
+ xfer += oprot->writeString(_iter268->second);
}
xfer += oprot->writeMapEnd();
}
@@ -7071,6 +7192,21 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeString(this->catName);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 10);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 11);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 12);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -7087,32 +7223,41 @@ void swap(Partition &a, Partition &b) {
swap(a.parameters, b.parameters);
swap(a.privileges, b.privileges);
swap(a.catName, b.catName);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
swap(a.__isset, b.__isset);
}
-Partition::Partition(const Partition& other267) {
- values = other267.values;
- dbName = other267.dbName;
- tableName = other267.tableName;
- createTime = other267.createTime;
- lastAccessTime = other267.lastAccessTime;
- sd = other267.sd;
- parameters = other267.parameters;
- privileges = other267.privileges;
- catName = other267.catName;
- __isset = other267.__isset;
-}
-Partition& Partition::operator=(const Partition& other268) {
- values = other268.values;
- dbName = other268.dbName;
- tableName = other268.tableName;
- createTime = other268.createTime;
- lastAccessTime = other268.lastAccessTime;
- sd = other268.sd;
- parameters = other268.parameters;
- privileges = other268.privileges;
- catName = other268.catName;
- __isset = other268.__isset;
+Partition::Partition(const Partition& other269) {
+ values = other269.values;
+ dbName = other269.dbName;
+ tableName = other269.tableName;
+ createTime = other269.createTime;
+ lastAccessTime = other269.lastAccessTime;
+ sd = other269.sd;
+ parameters = other269.parameters;
+ privileges = other269.privileges;
+ catName = other269.catName;
+ txnId = other269.txnId;
+ validWriteIdList = other269.validWriteIdList;
+ isStatsCompliant = other269.isStatsCompliant;
+ __isset = other269.__isset;
+}
+Partition& Partition::operator=(const Partition& other270) {
+ values = other270.values;
+ dbName = other270.dbName;
+ tableName = other270.tableName;
+ createTime = other270.createTime;
+ lastAccessTime = other270.lastAccessTime;
+ sd = other270.sd;
+ parameters = other270.parameters;
+ privileges = other270.privileges;
+ catName = other270.catName;
+ txnId = other270.txnId;
+ validWriteIdList = other270.validWriteIdList;
+ isStatsCompliant = other270.isStatsCompliant;
+ __isset = other270.__isset;
return *this;
}
void Partition::printTo(std::ostream& out) const {
@@ -7127,6 +7272,9 @@ void Partition::printTo(std::ostream& out) const {
out << ", " << "parameters=" << to_string(parameters);
out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "<null>"));
out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "<null>"));
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -7185,14 +7333,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->values.clear();
- uint32_t _size269;
- ::apache::thrift::protocol::TType _etype272;
- xfer += iprot->readListBegin(_etype272, _size269);
- this->values.resize(_size269);
- uint32_t _i273;
- for (_i273 = 0; _i273 < _size269; ++_i273)
+ uint32_t _size271;
+ ::apache::thrift::protocol::TType _etype274;
+ xfer += iprot->readListBegin(_etype274, _size271);
+ this->values.resize(_size271);
+ uint32_t _i275;
+ for (_i275 = 0; _i275 < _size271; ++_i275)
{
- xfer += iprot->readString(this->values[_i273]);
+ xfer += iprot->readString(this->values[_i275]);
}
xfer += iprot->readListEnd();
}
@@ -7229,17 +7377,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
- uint32_t _size274;
- ::apache::thrift::protocol::TType _ktype275;
- ::apache::thrift::protocol::TType _vtype276;
- xfer += iprot->readMapBegin(_ktype275, _vtype276, _size274);
- uint32_t _i278;
- for (_i278 = 0; _i278 < _size274; ++_i278)
+ uint32_t _size276;
+ ::apache::thrift::protocol::TType _ktype277;
+ ::apache::thrift::protocol::TType _vtype278;
+ xfer += iprot->readMapBegin(_ktype277, _vtype278, _size276);
+ uint32_t _i280;
+ for (_i280 = 0; _i280 < _size276; ++_i280)
{
- std::string _key279;
- xfer += iprot->readString(_key279);
- std::string& _val280 = this->parameters[_key279];
- xfer += iprot->readString(_val280);
+ std::string _key281;
+ xfer += iprot->readString(_key281);
+ std::string& _val282 = this->parameters[_key281];
+ xfer += iprot->readString(_val282);
}
xfer += iprot->readMapEnd();
}
@@ -7276,10 +7424,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
- std::vector<std::string> ::const_iterator _iter281;
- for (_iter281 = this->values.begin(); _iter281 != this->values.end(); ++_iter281)
+ std::vector<std::string> ::const_iterator _iter283;
+ for (_iter283 = this->values.begin(); _iter283 != this->values.end(); ++_iter283)
{
- xfer += oprot->writeString((*_iter281));
+ xfer += oprot->writeString((*_iter283));
}
xfer += oprot->writeListEnd();
}
@@ -7300,11 +7448,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter282;
- for (_iter282 = this->parameters.begin(); _iter282 != this->parameters.end(); ++_iter282)
+ std::map<std::string, std::string> ::const_iterator _iter284;
+ for (_iter284 = this->parameters.begin(); _iter284 != this->parameters.end(); ++_iter284)
{
- xfer += oprot->writeString(_iter282->first);
- xfer += oprot->writeString(_iter282->second);
+ xfer += oprot->writeString(_iter284->first);
+ xfer += oprot->writeString(_iter284->second);
}
xfer += oprot->writeMapEnd();
}
@@ -7331,23 +7479,23 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) {
swap(a.__isset, b.__isset);
}
-PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other283) {
- values = other283.values;
- createTime = other283.createTime;
- lastAccessTime = other283.lastAccessTime;
- relativePath = other283.relativePath;
- parameters = other283.parameters;
- privileges = other283.privileges;
- __isset = other283.__isset;
-}
-PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other284) {
- values = other284.values;
- createTime = other284.createTime;
- lastAccessTime = other284.lastAccessTime;
- relativePath = other284.relativePath;
- parameters = other284.parameters;
- privileges = other284.privileges;
- __isset = other284.__isset;
+PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other285) {
+ values = other285.values;
+ createTime = other285.createTime;
+ lastAccessTime = other285.lastAccessTime;
+ relativePath = other285.relativePath;
+ parameters = other285.parameters;
+ privileges = other285.privileges;
+ __isset = other285.__isset;
+}
+PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other286) {
+ values = other286.values;
+ createTime = other286.createTime;
+ lastAccessTime = other286.lastAccessTime;
+ relativePath = other286.relativePath;
+ parameters = other286.parameters;
+ privileges = other286.privileges;
+ __isset = other286.__isset;
return *this;
}
void PartitionWithoutSD::printTo(std::ostream& out) const {
@@ -7400,14 +7548,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size285;
- ::apache::thrift::protocol::TType _etype288;
- xfer += iprot->readListBegin(_etype288, _size285);
- this->partitions.resize(_size285);
- uint32_t _i289;
- for (_i289 = 0; _i289 < _size285; ++_i289)
+ uint32_t _size287;
+ ::apache::thrift::protocol::TType _etype290;
+ xfer += iprot->readListBegin(_etype290, _size287);
+ this->partitions.resize(_size287);
+ uint32_t _i291;
+ for (_i291 = 0; _i291 < _size287; ++_i291)
{
- xfer += this->partitions[_i289].read(iprot);
+ xfer += this->partitions[_i291].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -7444,10 +7592,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<PartitionWithoutSD> ::const_iterator _iter290;
- for (_iter290 = this->partitions.begin(); _iter290 != this->partitions.end(); ++_iter290)
+ std::vector<PartitionWithoutSD> ::const_iterator _iter292;
+ for (_iter292 = this->partitions.begin(); _iter292 != this->partitions.end(); ++_iter292)
{
- xfer += (*_iter290).write(oprot);
+ xfer += (*_iter292).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -7469,15 +7617,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) {
swap(a.__isset, b.__isset);
}
-PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other291) {
- partitions = other291.partitions;
- sd = other291.sd;
- __isset = other291.__isset;
+PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other293) {
+ partitions = other293.partitions;
+ sd = other293.sd;
+ __isset = other293.__isset;
}
-PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other292) {
- partitions = other292.partitions;
- sd = other292.sd;
- __isset = other292.__isset;
+PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other294) {
+ partitions = other294.partitions;
+ sd = other294.sd;
+ __isset = other294.__isset;
return *this;
}
void PartitionSpecWithSharedSD::printTo(std::ostream& out) const {
@@ -7522,14 +7670,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size293;
- ::apache::thrift::protocol::TType _etype296;
- xfer += iprot->readListBegin(_etype296, _size293);
- this->partitions.resize(_size293);
- uint32_t _i297;
- for (_i297 = 0; _i297 < _size293; ++_i297)
+ uint32_t _size295;
+ ::apache::thrift::protocol::TType _etype298;
+ xfer += iprot->readListBegin(_etype298, _size295);
+ this->partitions.resize(_size295);
+ uint32_t _i299;
+ for (_i299 = 0; _i299 < _size295; ++_i299)
{
- xfer += this->partitions[_i297].read(iprot);
+ xfer += this->partitions[_i299].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -7558,10 +7706,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<Partition> ::const_iterator _iter298;
- for (_iter298 = this->partitions.begin(); _iter298 != this->partitions.end(); ++_iter298)
+ std::vector<Partition> ::const_iterator _iter300;
+ for (_iter300 = this->partitions.begin(); _iter300 != this->partitions.end(); ++_iter300)
{
- xfer += (*_iter298).write(oprot);
+ xfer += (*_iter300).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -7578,13 +7726,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) {
swap(a.__isset, b.__isset);
}
-PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other299) {
- partitions = other299.partitions;
- __isset = other299.__isset;
+PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other301) {
+ partitions = other301.partitions;
+ __isset = other301.__isset;
}
-PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other300) {
- partitions = other300.partitions;
- __isset = other300.__isset;
+PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other302) {
+ partitions = other302.partitions;
+ __isset = other302.__isset;
return *this;
}
void PartitionListComposingSpec::printTo(std::ostream& out) const {
@@ -7626,6 +7774,21 @@ void PartitionSpec::__set_catName(const std::string& val) {
__isset.catName = true;
}
+void PartitionSpec::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void PartitionSpec::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
+void PartitionSpec::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -7695,6 +7858,32 @@ uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 7:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 8:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 9:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast303;
+ xfer += iprot->readI32(ecast303);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast303;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -7739,6 +7928,21 @@ uint32_t PartitionSpec::write(::apache::thrift::protocol::TProtocol* oprot) cons
xfer += oprot->writeString(this->catName);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 7);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 8);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 9);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -7752,26 +7956,35 @@ void swap(PartitionSpec &a, PartitionSpec &b) {
swap(a.sharedSDPartitionSpec, b.sharedSDPartitionSpec);
swap(a.partitionList, b.partitionList);
swap(a.catName, b.catName);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
swap(a.__isset, b.__isset);
}
-PartitionSpec::PartitionSpec(const PartitionSpec& other301) {
- dbName = other301.dbName;
- tableName = other301.tableName;
- rootPath = other301.rootPath;
- sharedSDPartitionSpec = other301.sharedSDPartitionSpec;
- partitionList = other301.partitionList;
- catName = other301.catName;
- __isset = other301.__isset;
+PartitionSpec::PartitionSpec(const PartitionSpec& other304) {
+ dbName = other304.dbName;
+ tableName = other304.tableName;
+ rootPath = other304.rootPath;
+ sharedSDPartitionSpec = other304.sharedSDPartitionSpec;
+ partitionList = other304.partitionList;
+ catName = other304.catName;
+ txnId = other304.txnId;
+ validWriteIdList = other304.validWriteIdList;
+ isStatsCompliant = other304.isStatsCompliant;
+ __isset = other304.__isset;
}
-PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other302) {
- dbName = other302.dbName;
- tableName = other302.tableName;
- rootPath = other302.rootPath;
- sharedSDPartitionSpec = other302.sharedSDPartitionSpec;
- partitionList = other302.partitionList;
- catName = other302.catName;
- __isset = other302.__isset;
+PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other305) {
+ dbName = other305.dbName;
+ tableName = other305.tableName;
+ rootPath = other305.rootPath;
+ sharedSDPartitionSpec = other305.sharedSDPartitionSpec;
+ partitionList = other305.partitionList;
+ catName = other305.catName;
+ txnId = other305.txnId;
+ validWriteIdList = other305.validWriteIdList;
+ isStatsCompliant = other305.isStatsCompliant;
+ __isset = other305.__isset;
return *this;
}
void PartitionSpec::printTo(std::ostream& out) const {
@@ -7783,6 +7996,9 @@ void PartitionSpec::printTo(std::ostream& out) const {
out << ", " << "sharedSDPartitionSpec="; (__isset.sharedSDPartitionSpec ? (out << to_string(sharedSDPartitionSpec)) : (out << "<null>"));
out << ", " << "partitionList="; (__isset.partitionList ? (out << to_string(partitionList)) : (out << "<null>"));
out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "<null>"));
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -7918,19 +8134,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other303) {
- numTrues = other303.numTrues;
- numFalses = other303.numFalses;
- numNulls = other303.numNulls;
- bitVectors = other303.bitVectors;
- __isset = other303.__isset;
+BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other306) {
+ numTrues = other306.numTrues;
+ numFalses = other306.numFalses;
+ numNulls = other306.numNulls;
+ bitVectors = other306.bitVectors;
+ __isset = other306.__isset;
}
-BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other304) {
- numTrues = other304.numTrues;
- numFalses = other304.numFalses;
- numNulls = other304.numNulls;
- bitVectors = other304.bitVectors;
- __isset = other304.__isset;
+BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other307) {
+ numTrues = other307.numTrues;
+ numFalses = other307.numFalses;
+ numNulls = other307.numNulls;
+ bitVectors = other307.bitVectors;
+ __isset = other307.__isset;
return *this;
}
void BooleanColumnStatsData::printTo(std::ostream& out) const {
@@ -8093,21 +8309,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other305) {
- lowValue = other305.lowValue;
- highValue = other305.highValue;
- numNulls = other305.numNulls;
- numDVs = other305.numDVs;
- bitVectors = other305.bitVectors;
- __isset = other305.__isset;
+DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other308) {
+ lowValue = other308.lowValue;
+ highValue = other308.highValue;
+ numNulls = other308.numNulls;
+ numDVs = other308.numDVs;
+ bitVectors = other308.bitVectors;
+ __isset = other308.__isset;
}
-DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other306) {
- lowValue = other306.lowValue;
- highValue = other306.highValue;
- numNulls = other306.numNulls;
- numDVs = other306.numDVs;
- bitVectors = other306.bitVectors;
- __isset = other306.__isset;
+DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other309) {
+ lowValue = other309.lowValue;
+ highValue = other309.highValue;
+ numNulls = other309.numNulls;
+ numDVs = other309.numDVs;
+ bitVectors = other309.bitVectors;
+ __isset = other309.__isset;
return *this;
}
void DoubleColumnStatsData::printTo(std::ostream& out) const {
@@ -8271,21 +8487,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other307) {
- lowValue = other307.lowValue;
- highValue = other307.highValue;
- numNulls = other307.numNulls;
- numDVs = other307.numDVs;
- bitVectors = other307.bitVectors;
- __isset = other307.__isset;
+LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other310) {
+ lowValue = other310.lowValue;
+ highValue = other310.highValue;
+ numNulls = other310.numNulls;
+ numDVs = other310.numDVs;
+ bitVectors = other310.bitVectors;
+ __isset = other310.__isset;
}
-LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other308) {
- lowValue = other308.lowValue;
- highValue = other308.highValue;
- numNulls = other308.numNulls;
- numDVs = other308.numDVs;
- bitVectors = other308.bitVectors;
- __isset = other308.__isset;
+LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other311) {
+ lowValue = other311.lowValue;
+ highValue = other311.highValue;
+ numNulls = other311.numNulls;
+ numDVs = other311.numDVs;
+ bitVectors = other311.bitVectors;
+ __isset = other311.__isset;
return *this;
}
void LongColumnStatsData::printTo(std::ostream& out) const {
@@ -8451,21 +8667,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other309) {
- maxColLen = other309.maxColLen;
- avgColLen = other309.avgColLen;
- numNulls = other309.numNulls;
- numDVs = other309.numDVs;
- bitVectors = other309.bitVectors;
- __isset = other309.__isset;
+StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other312) {
+ maxColLen = other312.maxColLen;
+ avgColLen = other312.avgColLen;
+ numNulls = other312.numNulls;
+ numDVs = other312.numDVs;
+ bitVectors = other312.bitVectors;
+ __isset = other312.__isset;
}
-StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other310) {
- maxColLen = other310.maxColLen;
- avgColLen = other310.avgColLen;
- numNulls = other310.numNulls;
- numDVs = other310.numDVs;
- bitVectors = other310.bitVectors;
- __isset = other310.__isset;
+StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other313) {
+ maxColLen = other313.maxColLen;
+ avgColLen = other313.avgColLen;
+ numNulls = other313.numNulls;
+ numDVs = other313.numDVs;
+ bitVectors = other313.bitVectors;
+ __isset = other313.__isset;
return *this;
}
void StringColumnStatsData::printTo(std::ostream& out) const {
@@ -8611,19 +8827,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other311) {
- maxColLen = other311.maxColLen;
- avgColLen = other311.avgColLen;
- numNulls = other311.numNulls;
- bitVectors = other311.bitVectors;
- __isset = other311.__isset;
+BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other314) {
+ maxColLen = other314.maxColLen;
+ avgColLen = other314.avgColLen;
+ numNulls = other314.numNulls;
+ bitVectors = other314.bitVectors;
+ __isset = other314.__isset;
}
-BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other312) {
- maxColLen = other312.maxColLen;
- avgColLen = other312.avgColLen;
- numNulls = other312.numNulls;
- bitVectors = other312.bitVectors;
- __isset = other312.__isset;
+BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other315) {
+ maxColLen = other315.maxColLen;
+ avgColLen = other315.avgColLen;
+ numNulls = other315.numNulls;
+ bitVectors = other315.bitVectors;
+ __isset = other315.__isset;
return *this;
}
void BinaryColumnStatsData::printTo(std::ostream& out) const {
@@ -8728,13 +8944,13 @@ void swap(Decimal &a, Decimal &b) {
swap(a.unscaled, b.unscaled);
}
-Decimal::Decimal(const Decimal& other313) {
- scale = other313.scale;
- unscaled = other313.unscaled;
+Decimal::Decimal(const Decimal& other316) {
+ scale = other316.scale;
+ unscaled = other316.unscaled;
}
-Decimal& Decimal::operator=(const Decimal& other314) {
- scale = other314.scale;
- unscaled = other314.unscaled;
+Decimal& Decimal::operator=(const Decimal& other317) {
+ scale = other317.scale;
+ unscaled = other317.unscaled;
return *this;
}
void Decimal::printTo(std::ostream& out) const {
@@ -8895,21 +9111,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other315) {
- lowValue = other315.lowValue;
- highValue = other315.highValue;
- numNulls = other315.numNulls;
- numDVs = other315.numDVs;
- bitVectors = other315.bitVectors;
- __isset = other315.__isset;
+DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other318) {
+ lowValue = other318.lowValue;
+ highValue = other318.highValue;
+ numNulls = other318.numNulls;
+ numDVs = other318.numDVs;
+ bitVectors = other318.bitVectors;
+ __isset = other318.__isset;
}
-DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other316) {
- lowValue = other316.lowValue;
- highValue = other316.highValue;
- numNulls = other316.numNulls;
- numDVs = other316.numDVs;
- bitVectors = other316.bitVectors;
- __isset = other316.__isset;
+DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other319) {
+ lowValue = other319.lowValue;
+ highValue = other319.highValue;
+ numNulls = other319.numNulls;
+ numDVs = other319.numDVs;
+ bitVectors = other319.bitVectors;
+ __isset = other319.__isset;
return *this;
}
void DecimalColumnStatsData::printTo(std::ostream& out) const {
@@ -8995,11 +9211,11 @@ void swap(Date &a, Date &b) {
swap(a.daysSinceEpoch, b.daysSinceEpoch);
}
-Date::Date(const Date& other317) {
- daysSinceEpoch = other317.daysSinceEpoch;
+Date::Date(const Date& other320) {
+ daysSinceEpoch = other320.daysSinceEpoch;
}
-Date& Date::operator=(const Date& other318) {
- daysSinceEpoch = other318.daysSinceEpoch;
+Date& Date::operator=(const Date& other321) {
+ daysSinceEpoch = other321.daysSinceEpoch;
return *this;
}
void Date::printTo(std::ostream& out) const {
@@ -9159,21 +9375,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other319) {
- lowValue = other319.lowValue;
- highValue = other319.highValue;
- numNulls = other319.numNulls;
- numDVs = other319.numDVs;
- bitVectors = other319.bitVectors;
- __isset = other319.__isset;
+DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other322) {
+ lowValue = other322.lowValue;
+ highValue = other322.highValue;
+ numNulls = other322.numNulls;
+ numDVs = other322.numDVs;
+ bitVectors = other322.bitVectors;
+ __isset = other322.__isset;
}
-DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other320) {
- lowValue = other320.lowValue;
- highValue = other320.highValue;
- numNulls = other320.numNulls;
- numDVs = other320.numDVs;
- bitVectors = other320.bitVectors;
- __isset = other320.__isset;
+DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other323) {
+ lowValue = other323.lowValue;
+ highValue = other323.highValue;
+ numNulls = other323.numNulls;
+ numDVs = other323.numDVs;
+ bitVectors = other323.bitVectors;
+ __isset = other323.__isset;
return *this;
}
void DateColumnStatsData::printTo(std::ostream& out) const {
@@ -9359,25 +9575,25 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) {
swap(a.__isset, b.__isset);
}
-ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other321) {
- booleanStats = other321.booleanStats;
- longStats = other321.longStats;
- doubleStats = other321.doubleStats;
- stringStats = other321.stringStats;
- binaryStats = other321.binaryStats;
- decimalStats = other321.decimalStats;
- dateStats = other321.dateStats;
- __isset = other321.__isset;
-}
-ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other322) {
- booleanStats = other322.booleanStats;
- longStats = other322.longStats;
- doubleStats = other322.doubleStats;
- stringStats = other322.stringStats;
- binaryStats = other322.binaryStats;
- decimalStats = other322.decimalStats;
- dateStats = other322.dateStats;
- __isset = other322.__isset;
+ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other324) {
+ booleanStats = other324.booleanStats;
+ longStats = other324.longStats;
+ doubleStats = other324.doubleStats;
+ stringStats = other324.stringStats;
+ binaryStats = other324.binaryStats;
+ decimalStats = other324.decimalStats;
+ dateStats = other324.dateStats;
+ __isset = other324.__isset;
+}
+ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other325) {
+ booleanStats = other325.booleanStats;
+ longStats = other325.longStats;
+ doubleStats = other325.doubleStats;
+ stringStats = other325.stringStats;
+ binaryStats = other325.binaryStats;
+ decimalStats = other325.decimalStats;
+ dateStats = other325.dateStats;
+ __isset = other325.__isset;
return *this;
}
void ColumnStatisticsData::printTo(std::ostream& out) const {
@@ -9505,15 +9721,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) {
swap(a.statsData, b.statsData);
}
-ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other323) {
- colName = other323.colName;
- colType = other323.colType;
- statsData = other323.statsData;
+ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other326) {
+ colName = other326.colName;
+ colType = other326.colType;
+ statsData = other326.statsData;
}
-ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other324) {
- colName = other324.colName;
- colType = other324.colType;
- statsData = other324.statsData;
+ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other327) {
+ colName = other327.colName;
+ colType = other327.colType;
+ statsData = other327.statsData;
return *this;
}
void ColumnStatisticsObj::printTo(std::ostream& out) const {
@@ -9695,23 +9911,23 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) {
swap(a.__isset, b.__isset);
}
-ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other325) {
- isTblLevel = other325.isTblLevel;
- dbName = other325.dbName;
- tableName = other325.tableName;
- partName = other325.partName;
- lastAnalyzed = other325.lastAnalyzed;
- catName = other325.catName;
- __isset = other325.__isset;
-}
-ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other326) {
- isTblLevel = other326.isTblLevel;
- dbName = other326.dbName;
- tableName = other326.tableName;
- partName = other326.partName;
- lastAnalyzed = other326.lastAnalyzed;
- catName = other326.catName;
- __isset = other326.__isset;
+ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other328) {
+ isTblLevel = other328.isTblLevel;
+ dbName = other328.dbName;
+ tableName = other328.tableName;
+ partName = other328.partName;
+ lastAnalyzed = other328.lastAnalyzed;
+ catName = other328.catName;
+ __isset = other328.__isset;
+}
+ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other329) {
+ isTblLevel = other329.isTblLevel;
+ dbName = other329.dbName;
+ tableName = other329.tableName;
+ partName = other329.partName;
+ lastAnalyzed = other329.lastAnalyzed;
+ catName = other329.catName;
+ __isset = other329.__isset;
return *this;
}
void ColumnStatisticsDesc::printTo(std::ostream& out) const {
@@ -9739,6 +9955,21 @@ void ColumnStatistics::__set_statsObj(const std::vector<ColumnStatisticsObj> & v
this->statsObj = val;
}
+void ColumnStatistics::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void ColumnStatistics::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
+void ColumnStatistics::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -9774,14 +10005,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->statsObj.clear();
- uint32_t _size327;
- ::apache::thrift::protocol::TType _etype330;
- xfer += iprot->readListBegin(_etype330, _size327);
- this->statsObj.resize(_size327);
- uint32_t _i331;
- for (_i331 = 0; _i331 < _size327; ++_i331)
+ uint32_t _size330;
+ ::apache::thrift::protocol::TType _etype333;
+ xfer += iprot->readListBegin(_etype333, _size330);
+ this->statsObj.resize(_size330);
+ uint32_t _i334;
+ for (_i334 = 0; _i334 < _size330; ++_i334)
{
- xfer += this->statsObj[_i331].read(iprot);
+ xfer += this->statsObj[_i334].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9790,6 +10021,32 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 5:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast335;
+ xfer += iprot->readI32(ecast335);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast335;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -9818,15 +10075,30 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c
xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->statsObj.size()));
- std::vector<ColumnStatisticsObj> ::const_iterator _iter332;
- for (_iter332 = this->statsObj.begin(); _iter332 != this->statsObj.end(); ++_iter332)
+ std::vector<ColumnStatisticsObj> ::const_iterator _iter336;
+ for (_iter336 = this->statsObj.begin(); _iter336 != this->statsObj.end(); ++_iter336)
{
- xfer += (*_iter332).write(oprot);
+ xfer += (*_iter336).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 3);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 5);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -9836,15 +10108,27 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) {
using ::std::swap;
swap(a.statsDesc, b.statsDesc);
swap(a.statsObj, b.statsObj);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
+ swap(a.__isset, b.__isset);
}
-ColumnStatistics::ColumnStatistics(const ColumnStatistics& other333) {
- statsDesc = other333.statsDesc;
- statsObj = other333.statsObj;
-}
-ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other334) {
- statsDesc = other334.statsDesc;
- statsObj = other334.statsObj;
+ColumnStatistics::ColumnStatistics(const ColumnStatistics& other337) {
+ statsDesc = other337.statsDesc;
+ statsObj = other337.statsObj;
+ txnId = other337.txnId;
+ validWriteIdList = other337.validWriteIdList;
+ isStatsCompliant = other337.isStatsCompliant;
+ __isset = other337.__isset;
+}
+ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other338) {
+ statsDesc = other338.statsDesc;
+ statsObj = other338.statsObj;
+ txnId = other338.txnId;
+ validWriteIdList = other338.validWriteIdList;
+ isStatsCompliant = other338.isStatsCompliant;
+ __isset = other338.__isset;
return *this;
}
void ColumnStatistics::printTo(std::ostream& out) const {
@@ -9852,6 +10136,9 @@ void ColumnStatistics::printTo(std::ostream& out) const {
out << "ColumnStatistics(";
out << "statsDesc=" << to_string(statsDesc);
out << ", " << "statsObj=" << to_string(statsObj);
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -9868,6 +10155,11 @@ void AggrStats::__set_partsFound(const int64_t val) {
this->partsFound = val;
}
+void AggrStats::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -9895,14 +10187,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colStats.clear();
- uint32_t _size335;
- ::apache::thrift::protocol::TType _etype338;
- xfer += iprot->readListBegin(_etype338, _size335);
- this->colStats.resize(_size335);
- uint32_t _i339;
- for (_i339 = 0; _i339 < _size335; ++_i339)
+ uint32_t _size339;
+ ::apache::thrift::protocol::TType _etype342;
+ xfer += iprot->readListBegin(_etype342, _size339);
+ this->colStats.resize(_size339);
+ uint32_t _i343;
+ for (_i343 = 0; _i343 < _size339; ++_i343)
{
- xfer += this->colStats[_i339].read(iprot);
+ xfer += this->colStats[_i343].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9919,6 +10211,16 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast344;
+ xfer += iprot->readI32(ecast344);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast344;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -9943,10 +10245,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
- std::vector<ColumnStatisticsObj> ::const_iterator _iter340;
- for (_iter340 = this->colStats.begin(); _iter340 != this->colStats.end(); ++_iter340)
+ std::vector<ColumnStatisticsObj> ::const_iterator _iter345;
+ for (_iter345 = this->colStats.begin(); _iter345 != this->colStats.end(); ++_iter345)
{
- xfer += (*_iter340).write(oprot);
+ xfer += (*_iter345).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9956,6 +10258,11 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeI64(this->partsFound);
xfer += oprot->writeFieldEnd();
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 3);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -9965,15 +10272,21 @@ void swap(AggrStats &a, AggrStats &b) {
using ::std::swap;
swap(a.colStats, b.colStats);
swap(a.partsFound, b.partsFound);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
+ swap(a.__isset, b.__isset);
}
-AggrStats::AggrStats(const AggrStats& other341) {
- colStats = other341.colStats;
- partsFound = other341.partsFound;
+AggrStats::AggrStats(const AggrStats& other346) {
+ colStats = other346.colStats;
+ partsFound = other346.partsFound;
+ isStatsCompliant = other346.isStatsCompliant;
+ __isset = other346.__isset;
}
-AggrStats& AggrStats::operator=(const AggrStats& other342) {
- colStats = other342.colStats;
- partsFound = other342.partsFound;
+AggrStats& AggrStats::operator=(const AggrStats& other347) {
+ colStats = other347.colStats;
+ partsFound = other347.partsFound;
+ isStatsCompliant = other347.isStatsCompliant;
+ __isset = other347.__isset;
return *this;
}
void AggrStats::printTo(std::ostream& out) const {
@@ -9981,6 +10294,7 @@ void AggrStats::printTo(std::ostream& out) const {
out << "AggrStats(";
out << "colStats=" << to_string(colStats);
out << ", " << "partsFound=" << to_string(partsFound);
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -9998,6 +10312,16 @@ void SetPartitionsStatsRequest::__set_needMerge(const bool val) {
__isset.needMerge = true;
}
+void SetPartitionsStatsRequest::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void SetPartitionsStatsRequest::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -10024,14 +10348,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colStats.clear();
- uint32_t _size343;
- ::apache::thrift::protocol::TType _etype346;
- xfer += iprot->readListBegin(_etype346, _size343);
- this->colStats.resize(_size343);
- uint32_t _i347;
- for (_i347 = 0; _i347 < _size343; ++_i347)
+ uint32_t _size348;
+ ::apache::thrift::protocol::TType _etype351;
+ xfer += iprot->readListBegin(_etype351, _size348);
+ this->colStats.resize(_size348);
+ uint32_t _i352;
+ for (_i352 = 0; _i352 < _size348; ++_i352)
{
- xfer += this->colStats[_i347].read(iprot);
+ xfer += this->colStats[_i352].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10048,6 +10372,22 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol*
xfer += iprot->skip(ftype);
}
break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -10070,10 +10410,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
- std::vector<ColumnStatistics> ::const_iterator _iter348;
- for (_iter348 = this->colStats.begin(); _iter348 != this->colStats.end(); ++_iter348)
+ std::vector<ColumnStatistics> ::const_iterator _iter353;
+ for (_iter353 = this->colStats.begin(); _iter353 != this->colStats.end(); ++_iter353)
{
- xfer += (*_iter348).write(oprot);
+ xfer += (*_iter353).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10084,6 +10424,16 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeBool(this->needMerge);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 3);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -10093,18 +10443,24 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) {
using ::std::swap;
swap(a.colStats, b.colStats);
swap(a.needMerge, b.needMerge);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
swap(a.__isset, b.__isset);
}
-SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other349) {
- colStats = other349.colStats;
- needMerge = other349.needMerge;
- __isset = other349.__isset;
+SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other354) {
+ colStats = other354.colStats;
+ needMerge = other354.needMerge;
+ txnId = other354.txnId;
+ validWriteIdList = other354.validWriteIdList;
+ __isset = other354.__isset;
}
-SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other350) {
- colStats = other350.colStats;
- needMerge = other350.needMerge;
- __isset = other350.__isset;
+SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other355) {
+ colStats = other355.colStats;
+ needMerge = other355.needMerge;
+ txnId = other355.txnId;
+ validWriteIdList = other355.validWriteIdList;
+ __isset = other355.__isset;
return *this;
}
void SetPartitionsStatsRequest::printTo(std::ostream& out) const {
@@ -10112,6 +10468,8 @@ void SetPartitionsStatsRequest::printTo(std::ostream& out) const {
out << "SetPartitionsStatsRequest(";
out << "colStats=" << to_string(colStats);
out << ", " << "needMerge="; (__isset.needMerge ? (out << to_string(needMerge)) : (out << "<null>"));
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
out << ")";
}
@@ -10153,14 +10511,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->fieldSchemas.clear();
- uint32_t _size351;
- ::apache::thrift::protocol::TType _etype354;
- xfer += iprot->readListBegin(_etype354, _size351);
- this->fieldSchemas.resize(_size351);
- uint32_t _i355;
- for (_i355 = 0; _i355 < _size351; ++_i355)
+ uint32_t _size356;
+ ::apache::thrift::protocol::TType _etype359;
+ xfer += iprot->readListBegin(_etype359, _size356);
+ this->fieldSchemas.resize(_size356);
+ uint32_t _i360;
+ for (_i360 = 0; _i360 < _size356; ++_i360)
{
- xfer += this->fieldSchemas[_i355].read(iprot);
+ xfer += this->fieldSchemas[_i360].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10173,17 +10531,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->properties.clear();
- uint32_t _size356;
- ::apache::thrift::protocol::TType _ktype357;
- ::apache::thrift::protocol::TType _vtype358;
- xfer += iprot->readMapBegin(_ktype357, _vtype358, _size356);
- uint32_t _i360;
- for (_i360 = 0; _i360 < _size356; ++_i360)
+ uint32_t _size361;
+ ::apache::thrift::protocol::TType _ktype362;
+ ::apache::thrift::protocol::TType _vtype363;
+ xfer += iprot->readMapBegin(_ktype362, _vtype363, _size361);
+ uint32_t _i365;
+ for (_i365 = 0; _i365 < _size361; ++_i365)
{
- std::string _key361;
- xfer += iprot->readString(_key361);
- std::string& _val362 = this->properties[_key361];
- xfer += iprot->readString(_val362);
+ std::string _key366;
+ xfer += iprot->readString(_key366);
+ std::string& _val367 = this->properties[_key366];
+ xfer += iprot->readString(_val367);
}
xfer += iprot->readMapEnd();
}
@@ -10212,10 +10570,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->fieldSchemas.size()));
- std::vector<FieldSchema> ::const_iterator _iter363;
- for (_iter363 = this->fieldSchemas.begin(); _iter363 != this->fieldSchemas.end(); ++_iter363)
+ std::vector<FieldSchema> ::const_iterator _iter368;
+ for (_iter368 = this->fieldSchemas.begin(); _iter368 != this->fieldSchemas.end(); ++_iter368)
{
- xfer += (*_iter363).write(oprot);
+ xfer += (*_iter368).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10224,11 +10582,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
- std::map<std::string, std::string> ::const_iterator _iter364;
- for (_iter364 = this->properties.begin(); _iter364 != this->properties.end(); ++_iter364)
+ std::map<std::string, std::string> ::const_iterator _iter369;
+ for (_iter369 = this->properties.begin(); _iter369 != this->properties.end(); ++_iter369)
{
- xfer += oprot->writeString(_iter364->first);
- xfer += oprot->writeString(_iter364->second);
+ xfer += oprot->writeString(_iter369->first);
+ xfer += oprot->writeString(_iter369->second);
}
xfer += oprot->writeMapEnd();
}
@@ -10246,15 +10604,15 @@ void swap(Schema &a, Schema &b) {
swap(a.__isset, b.__isset);
}
-Schema::Schema(const Schema& other365) {
- fieldSchemas = other365.fieldSchemas;
- properties = other365.properties;
- __isset = other365.__isset;
+Schema::Schema(const Schema& other370) {
+ fieldSchemas = other370.fieldSchemas;
+ properties = other370.properties;
+ __isset = other370.__isset;
}
-Schema& Schema::operator=(const Schema& other366) {
- fieldSchemas = other366.fieldSchemas;
- properties = other366.properties;
- __isset = other366.__isset;
+Schema& Schema::operator=(const Schema& other371) {
+ fieldSchemas = other371.fieldSchemas;
+ properties = other371.properties;
+ __isset = other371.__isset;
return *this;
}
void Schema::printTo(std::ostream& out) const {
@@ -10299,17 +10657,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->properties.clear();
- uint32_t _size367;
- ::apache::thrift::protocol::TType _ktype368;
- ::apache::thrift::protocol::TType _vtype369;
- xfer += iprot->readMapBegin(_ktype368, _vtype369, _size367);
- uint32_t _i371;
- for (_i371 = 0; _i371 < _size367; ++_i371)
+ uint32_t _size372;
+ ::apache::thrift::protocol::TType _ktype373;
+ ::apache::thrift::protocol::TType _vtype374;
+ xfer += iprot->readMapBegin(_ktype373, _vtype374, _size372);
+ uint32_t _i376;
+ for (_i376 = 0; _i376 < _size372; ++_i376)
{
- std::string _key372;
- xfer += iprot->readString(_key372);
- std::string& _val373 = this->properties[_key372];
- xfer += iprot->readString(_val373);
+ std::string _key377;
+ xfer += iprot->readString(_key377);
+ std::string& _val378 = this->properties[_key377];
+ xfer += iprot->readString(_val378);
}
xfer += iprot->readMapEnd();
}
@@ -10338,11 +10696,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
- std::map<std::string, std::string> ::const_iterator _iter374;
- for (_iter374 = this->properties.begin(); _iter374 != this->properties.end(); ++_iter374)
+ std::map<std::string, std::string> ::const_iterator _iter379;
+ for (_iter379 = this->properties.begin(); _iter379 != this->properties.end(); ++_iter379)
{
- xfer += oprot->writeString(_iter374->first);
- xfer += oprot->writeString(_iter374->second);
+ xfer += oprot->writeString(_iter379->first);
+ xfer += oprot->writeString(_iter379->second);
}
xfer += oprot->writeMapEnd();
}
@@ -10359,13 +10717,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) {
swap(a.__isset, b.__isset);
}
-EnvironmentContext::EnvironmentContext(const EnvironmentContext& other375) {
- properties = other375.properties;
- __isset = other375.__isset;
+EnvironmentContext::EnvironmentContext(const EnvironmentContext& other380) {
+ properties = other380.properties;
+ __isset = other380.__isset;
}
-EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other376) {
- properties = other376.properties;
- __isset = other376.__isset;
+EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other381) {
+ properties = other381.properties;
+ __isset = other381.__isset;
return *this;
}
void EnvironmentContext::printTo(std::ostream& out) const {
@@ -10487,17 +10845,17 @@ void swap(PrimaryKeysRequest &a, PrimaryKeysRequest &b) {
swap(a.__isset, b.__isset);
}
-PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other377) {
- db_name = other377.db_name;
- tbl_name = other377.tbl_name;
- catName = other377.catName;
- __isset = other377.__isset;
+PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other382) {
+ db_name = other382.db_name;
+ tbl_name = other382.tbl_name;
+ catName = other382.catName;
+ __isset = other382.__isset;
}
-PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other378) {
- db_name = other378.db_name;
- tbl_name = other378.tbl_name;
- catName = other378.catName;
- __isset = other378.__isset;
+PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other383) {
+ db_name = other383.db_name;
+ tbl_name = other383.tbl_name;
+ catName = other383.catName;
+ __isset = other383.__isset;
return *this;
}
void PrimaryKeysRequest::printTo(std::ostream& out) const {
@@ -10544,14 +10902,14 @@ uint32_t PrimaryKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->primaryKeys.clear();
- uint32_t _size379;
- ::apache::thrift::protocol::TType _etype382;
- xfer += iprot->readListBegin(_etype382, _size379);
- this->primaryKeys.resize(_size379);
- uint32_t _i383;
- for (_i383 = 0; _i383 < _size379; ++_i383)
+ uint32_t _size384;
+ ::apache::thrift::protocol::TType _etype387;
+ xfer += iprot->readListBegin(_etype387, _size384);
+ this->primaryKeys.resize(_size384);
+ uint32_t _i388;
+ for (_i388 = 0; _i388 < _size384; ++_i388)
{
- xfer += this->primaryKeys[_i383].read(iprot);
+ xfer += this->primaryKeys[_i388].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10582,10 +10940,10 @@ uint32_t PrimaryKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter384;
- for (_iter384 = this->primaryKeys.begin(); _iter384 != this->primaryKeys.end(); ++_iter384)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter389;
+ for (_iter389 = this->primaryKeys.begin(); _iter389 != this->primaryKeys.end(); ++_iter389)
{
- xfer += (*_iter384).write(oprot);
+ xfer += (*_iter389).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10601,11 +10959,11 @@ void swap(PrimaryKeysResponse &a, PrimaryKeysResponse &b) {
swap(a.primaryKeys, b.primaryKeys);
}
-PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other385) {
- primaryKeys = other385.primaryKeys;
+PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other390) {
+ primaryKeys = other390.primaryKeys;
}
-PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other386) {
- primaryKeys = other386.primaryKeys;
+PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other391) {
+ primaryKeys = other391.primaryKeys;
return *this;
}
void PrimaryKeysResponse::printTo(std::ostream& out) const {
@@ -10755,21 +11113,21 @@ void swap(ForeignKeysRequest &a, ForeignKeysRequest &b) {
swap(a.__isset, b.__isset);
}
-ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other387) {
- parent_db_name = other387.parent_db_name;
- parent_tbl_name = other387.parent_tbl_name;
- foreign_db_name = other387.foreign_db_name;
- foreign_tbl_name = other387.foreign_tbl_name;
- catName = other387.catName;
- __isset = other387.__isset;
-}
-ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other388) {
- parent_db_name = other388.parent_db_name;
- parent_tbl_name = other388.parent_tbl_name;
- foreign_db_name = other388.foreign_db_name;
- foreign_tbl_name = other388.foreign_tbl_name;
- catName = other388.catName;
- __isset = other388.__isset;
+ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other392) {
+ parent_db_name = other392.parent_db_name;
+ parent_tbl_name = other392.parent_tbl_name;
+ foreign_db_name = other392.foreign_db_name;
+ foreign_tbl_name = other392.foreign_tbl_name;
+ catName = other392.catName;
+ __isset = other392.__isset;
+}
+ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other393) {
+ parent_db_name = other393.parent_db_name;
+ parent_tbl_name = other393.parent_tbl_name;
+ foreign_db_name = other393.foreign_db_name;
+ foreign_tbl_name = other393.foreign_tbl_name;
+ catName = other393.catName;
+ __isset = other393.__isset;
return *this;
}
void ForeignKeysRequest::printTo(std::ostream& out) const {
@@ -10818,14 +11176,14 @@ uint32_t ForeignKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->foreignKeys.clear();
- uint32_t _size389;
- ::apache::thrift::protocol::TType _etype392;
- xfer += iprot->readListBegin(_etype392, _size389);
- this->foreignKeys.resize(_size389);
- uint32_t _i393;
- for (_i393 = 0; _i393 < _size389; ++_i393)
+ uint32_t _size394;
+ ::apache::thrift::protocol::TType _etype397;
+ xfer += iprot->readListBegin(_etype397, _size394);
+ this->foreignKeys.resize(_size394);
+ uint32_t _i398;
+ for (_i398 = 0; _i398 < _size394; ++_i398)
{
- xfer += this->foreignKeys[_i393].read(iprot);
+ xfer += this->foreignKeys[_i398].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10856,10 +11214,10 @@ uint32_t ForeignKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
- std::vector<SQLForeignKey> ::const_iterator _iter394;
- for (_iter394 = this->foreignKeys.begin(); _iter394 != this->foreignKeys.end(); ++_iter394)
+ std::vector<SQLForeignKey> ::const_iterator _iter399;
+ for (_iter399 = this->foreignKeys.begin(); _iter399 != this->foreignKeys.end(); ++_iter399)
{
- xfer += (*_iter394).write(oprot);
+ xfer += (*_iter399).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10875,11 +11233,11 @@ void swap(ForeignKeysResponse &a, ForeignKeysResponse &b) {
swap(a.foreignKeys, b.foreignKeys);
}
-ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other395) {
- foreignKeys = other395.foreignKeys;
+ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other400) {
+ foreignKeys = other400.foreignKeys;
}
-ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other396) {
- foreignKeys = other396.foreignKeys;
+ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other401) {
+ foreignKeys = other401.foreignKeys;
return *this;
}
void ForeignKeysResponse::printTo(std::ostream& out) const {
@@ -11001,15 +11359,15 @@ void swap(UniqueConstraintsRequest &a, UniqueConstraintsRequest &b) {
swap(a.tbl_name, b.tbl_name);
}
-UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other397) {
- catName = other397.catName;
- db_name = other397.db_name;
- tbl_name = other397.tbl_name;
+UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other402) {
+ catName = other402.catName;
+ db_name = other402.db_name;
+ tbl_name = other402.tbl_name;
}
-UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other398) {
- catName = other398.catName;
- db_name = other398.db_name;
- tbl_name = other398.tbl_name;
+UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other403) {
+ catName = other403.catName;
+ db_name = other403.db_name;
+ tbl_name = other403.tbl_name;
return *this;
}
void UniqueConstraintsRequest::printTo(std::ostream& out) const {
@@ -11056,14 +11414,14 @@ uint32_t UniqueConstraintsResponse::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->uniqueConstraints.clear();
- uint32_t _size399;
- ::apache::thrift::protocol::TType _etype402;
- xfer += iprot->readListBegin(_etype402, _size399);
- this->uniqueConstraints.resize(_size399);
- uint32_t _i403;
- for (_i403 = 0; _i403 < _size399; ++_i403)
+ uint32_t _size404;
+ ::apache::thrift::protocol::TType _etype407;
+ xfer += iprot->readListBegin(_etype407, _size404);
+ this->uniqueConstraints.resize(_size404);
+ uint32_t _i408;
+ for (_i408 = 0; _i408 < _size404; ++_i408)
{
- xfer += this->uniqueConstraints[_i403].read(iprot);
+ xfer += this->uniqueConstraints[_i408].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -11094,10 +11452,10 @@ uint32_t UniqueConstraintsResponse::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraints.size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter404;
- for (_iter404 = this->uniqueConstraints.begin(); _iter404 != this->uniqueConstraints.end(); ++_iter404)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter409;
+ for (_iter409 = this->uniqueConstraints.begin(); _iter409 != this->uniqueConstraints.end(); ++_iter409)
{
- xfer += (*_iter404).write(oprot);
+ xfer += (*_iter409).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11113,11 +11471,11 @@ void swap(UniqueConstraintsResponse &a, UniqueConstraintsResponse &b) {
swap(a.uniqueConstraints, b.uniqueConstraints);
}
-UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other405) {
- uniqueConstraints = other405.uniqueConstraints;
+UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other410) {
+ uniqueConstraints = other410.uniqueConstraints;
}
-UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other406) {
- uniqueConstraints = other406.uniqueConstraints;
+UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other411) {
+ uniqueConstraints = other411.uniqueConstraints;
return *this;
}
void UniqueConstraintsResponse::printTo(std::ostream& out) const {
@@ -11239,15 +11597,15 @@ void swap(NotNullConstraintsRequest &a, NotNullConstraintsRequest &b) {
swap(a.tbl_name, b.tbl_name);
}
-NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other407) {
- catName = other407.catName;
- db_name = other407.db_name;
- tbl_name = other407.tbl_name;
+NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other412) {
+ catName = other412.catName;
+ db_name = other412.db_name;
+ tbl_name = other412.tbl_name;
}
-NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other408) {
- catName = other408.catName;
- db_name = other408.db_name;
- tbl_name = other408.tbl_name;
+NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other413) {
+ catName = other413.catName;
+ db_name = other413.db_name;
+ tbl_name = other413.tbl_name;
return *this;
}
void NotNullConstraintsRequest::printTo(std::ostream& out) const {
@@ -11294,14 +11652,14 @@ uint32_t NotNullConstraintsResponse::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->notNullConstraints.clear();
- uint32_t _size409;
- ::apache::thrift::protocol::TType _etype412;
- xfer += iprot->readListBegin(_etype412, _size409);
- this->notNullConstraints.resize(_size409);
- uint32_t _i413;
- for (_i413 = 0; _i413 < _size409; ++_i413)
+ uint32_t _size414;
+ ::apache::thrift::protocol::TType _etype417;
+ xfer += iprot->readListBegin(_etype417, _size414);
+ this->notNullConstraints.resize(_size414);
+ uint32_t _i418;
+ for (_i418 = 0; _i418 < _size414; ++_i418)
{
- xfer += this->notNullConstraints[_i413].read(iprot);
+ xfer += this->notNullConstraints[_i418].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -11332,10 +11690,10 @@ uint32_t NotNullConstraintsResponse::write(::apache::thrift::protocol::TProtocol
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraints.size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter414;
- for (_iter414 = this->notNullConstraints.begin(); _iter414 != this->notNullConstraints.end(); ++_iter414)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter419;
+ for (_iter419 = this->notNullConstraints.begin(); _iter419 != this->notNullConstraints.end(); ++_iter419)
{
- xfer += (*_iter414).write(oprot);
+ xfer += (*_iter419).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11351,11 +11709,11 @@ void swap(NotNullConstraintsResponse &a, NotNullConstraintsResponse &b) {
swap(a.notNullConstraints, b.notNullConstraints);
}
-NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other415) {
- notNullConstraints = other415.notNullConstraints;
+NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other420) {
+ notNullConstraints = other420.notNullConstraints;
}
-NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other416) {
- notNullConstraints = other416.notNullConstraints;
+NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other421) {
+ notNullConstraints = other421.notNullConstraints;
return *this;
}
void NotNullConstraintsResponse::printTo(std::
<TRUNCATED>
[03/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index fc640d0..90aded5 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -141,6 +141,14 @@ module SchemaVersionState
VALID_VALUES = Set.new([INITIATED, START_REVIEW, CHANGES_REQUIRED, REVIEWED, ENABLED, DISABLED, ARCHIVED, DELETED]).freeze
end
+module IsolationLevelCompliance
+ YES = 1
+ NO = 2
+ UNKNOWN = 3
+ VALUE_MAP = {1 => "YES", 2 => "NO", 3 => "UNKNOWN"}
+ VALID_VALUES = Set.new([YES, NO, UNKNOWN]).freeze
+end
+
module FunctionType
JAVA = 1
VALUE_MAP = {1 => "JAVA"}
@@ -1062,6 +1070,9 @@ class Table
CREATIONMETADATA = 16
CATNAME = 17
OWNERTYPE = 18
+ TXNID = 19
+ VALIDWRITEIDLIST = 20
+ ISSTATSCOMPLIANT = 21
FIELDS = {
TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
@@ -1081,7 +1092,10 @@ class Table
REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true},
CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true},
CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
- OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default => 1, :optional => true, :enum_class => ::PrincipalType}
+ OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default => 1, :optional => true, :enum_class => ::PrincipalType},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
@@ -1090,6 +1104,9 @@ class Table
unless @ownerType.nil? || ::PrincipalType::VALID_VALUES.include?(@ownerType)
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field ownerType!')
end
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1106,6 +1123,9 @@ class Partition
PARAMETERS = 7
PRIVILEGES = 8
CATNAME = 9
+ TXNID = 10
+ VALIDWRITEIDLIST = 11
+ ISSTATSCOMPLIANT = 12
FIELDS = {
VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}},
@@ -1116,12 +1136,18 @@ class Partition
SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor},
PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1195,6 +1221,9 @@ class PartitionSpec
SHAREDSDPARTITIONSPEC = 4
PARTITIONLIST = 5
CATNAME = 6
+ TXNID = 7
+ VALIDWRITEIDLIST = 8
+ ISSTATSCOMPLIANT = 9
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
@@ -1202,12 +1231,18 @@ class PartitionSpec
ROOTPATH => {:type => ::Thrift::Types::STRING, :name => 'rootPath'},
SHAREDSDPARTITIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'sharedSDPartitionSpec', :class => ::PartitionSpecWithSharedSD, :optional => true},
PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1547,10 +1582,16 @@ class ColumnStatistics
include ::Thrift::Struct, ::Thrift::Struct_Union
STATSDESC = 1
STATSOBJ = 2
+ TXNID = 3
+ VALIDWRITEIDLIST = 4
+ ISSTATSCOMPLIANT = 5
FIELDS = {
STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc', :class => ::ColumnStatisticsDesc},
- STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}
+ STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
@@ -1558,6 +1599,9 @@ class ColumnStatistics
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsDesc is unset!') unless @statsDesc
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsObj is unset!') unless @statsObj
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1567,10 +1611,12 @@ class AggrStats
include ::Thrift::Struct, ::Thrift::Struct_Union
COLSTATS = 1
PARTSFOUND = 2
+ ISSTATSCOMPLIANT = 3
FIELDS = {
COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
- PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'}
+ PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
@@ -1578,6 +1624,9 @@ class AggrStats
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partsFound is unset!') unless @partsFound
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1587,10 +1636,14 @@ class SetPartitionsStatsRequest
include ::Thrift::Struct, ::Thrift::Struct_Union
COLSTATS = 1
NEEDMERGE = 2
+ TXNID = 3
+ VALIDWRITEIDLIST = 4
FIELDS = {
COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}},
- NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true}
+ NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -2055,15 +2108,20 @@ end
class TableStatsResult
include ::Thrift::Struct, ::Thrift::Struct_Union
TABLESTATS = 1
+ ISSTATSCOMPLIANT = 2
FIELDS = {
- TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}
+ TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableStats is unset!') unless @tableStats
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -2072,15 +2130,20 @@ end
class PartitionsStatsResult
include ::Thrift::Struct, ::Thrift::Struct_Union
PARTSTATS = 1
+ ISSTATSCOMPLIANT = 2
FIELDS = {
- PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}}
+ PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partStats is unset!') unless @partStats
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -2092,12 +2155,16 @@ class TableStatsRequest
TBLNAME = 2
COLNAMES = 3
CATNAME = 4
+ TXNID = 5
+ VALIDWRITEIDLIST = 6
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -2118,13 +2185,17 @@ class PartitionsStatsRequest
COLNAMES = 3
PARTNAMES = 4
CATNAME = 5
+ TXNID = 6
+ VALIDWRITEIDLIST = 7
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}},
PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -2142,14 +2213,19 @@ end
class AddPartitionsResult
include ::Thrift::Struct, ::Thrift::Struct_Union
PARTITIONS = 1
+ ISSTATSCOMPLIANT = 2
FIELDS = {
- PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true}
+ PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -2163,6 +2239,8 @@ class AddPartitionsRequest
IFNOTEXISTS = 4
NEEDRESULT = 5
CATNAME = 6
+ TXNID = 7
+ VALIDWRITEIDLIST = 8
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
@@ -2170,7 +2248,9 @@ class AddPartitionsRequest
PARTS => {:type => ::Thrift::Types::LIST, :name => 'parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
IFNOTEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifNotExists'},
NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -3731,12 +3811,16 @@ class GetTableRequest
TBLNAME = 2
CAPABILITIES = 3
CATNAME = 4
+ TXNID = 5
+ VALIDWRITEIDLIST = 6
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -3752,15 +3836,20 @@ end
class GetTableResult
include ::Thrift::Struct, ::Thrift::Struct_Union
TABLE = 1
+ ISSTATSCOMPLIANT = 2
FIELDS = {
- TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table}
+ TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field table is unset!') unless @table
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index bbf3f12..66e4a63 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -1416,13 +1416,13 @@ module ThriftHiveMetastore
return
end
- def alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
- send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
+ def alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context, txnId, writeIdList)
+ send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context, txnId, writeIdList)
recv_alter_partitions_with_environment_context()
end
- def send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
- send_message('alter_partitions_with_environment_context', Alter_partitions_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :new_parts => new_parts, :environment_context => environment_context)
+ def send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context, txnId, writeIdList)
+ send_message('alter_partitions_with_environment_context', Alter_partitions_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :new_parts => new_parts, :environment_context => environment_context, :txnId => txnId, :writeIdList => writeIdList)
end
def recv_alter_partitions_with_environment_context()
@@ -4580,7 +4580,7 @@ module ThriftHiveMetastore
args = read_args(iprot, Alter_partitions_with_environment_context_args)
result = Alter_partitions_with_environment_context_result.new()
begin
- @handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context)
+ @handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context, args.txnId, args.writeIdList)
rescue ::InvalidOperationException => o1
result.o1 = o1
rescue ::MetaException => o2
@@ -9276,12 +9276,16 @@ module ThriftHiveMetastore
TBL_NAME = 2
NEW_PARTS = 3
ENVIRONMENT_CONTEXT = 4
+ TXNID = 5
+ WRITEIDLIST = 6
FIELDS = {
DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
- ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}
+ ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId'},
+ WRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'writeIdList'}
}
def struct_fields; FIELDS; end
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index 050dca9..010870d 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -197,6 +197,6 @@ public interface AlterHandler extends Configurable {
*/
List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
final String dbname, final String name, final List<Partition> new_parts,
- EnvironmentContext environmentContext,IHMSHandler handler)
+ EnvironmentContext environmentContext, long txnId, String writeIdList, IHMSHandler handler)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 33999d0..3a2fda0 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -152,7 +152,7 @@ public class HiveAlterHandler implements AlterHandler {
// check if table with the new name already exists
if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
- if (msdb.getTable(catName, newDbName, newTblName) != null) {
+ if (msdb.getTable(catName, newDbName, newTblName, -1, null) != null) {
throw new InvalidOperationException("new table " + newDbName
+ "." + newTblName + " already exists");
}
@@ -161,7 +161,7 @@ public class HiveAlterHandler implements AlterHandler {
msdb.openTransaction();
// get old table
- oldt = msdb.getTable(catName, dbname, name);
+ oldt = msdb.getTable(catName, dbname, name, -1, null);
if (oldt == null) {
throw new InvalidOperationException("table " +
TableName.getQualified(catName, dbname, name) + " doesn't exist");
@@ -296,7 +296,8 @@ public class HiveAlterHandler implements AlterHandler {
for (Partition part : partBatch) {
partValues.add(part.getValues());
}
- msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch);
+ msdb.alterPartitions(
+ catName, newDbName, newTblName, partValues, partBatch, -1, null);
}
}
@@ -509,7 +510,7 @@ public class HiveAlterHandler implements AlterHandler {
try {
msdb.openTransaction();
- Table tbl = msdb.getTable(catName, dbname, name);
+ Table tbl = msdb.getTable(catName, dbname, name, -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to alter partition because table or database does not exist.");
@@ -565,7 +566,7 @@ public class HiveAlterHandler implements AlterHandler {
Database db;
try {
msdb.openTransaction();
- Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name);
+ Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name, -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to alter partition because table or database does not exist.");
@@ -714,14 +715,15 @@ public class HiveAlterHandler implements AlterHandler {
EnvironmentContext environmentContext)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts,
- environmentContext, null);
+ environmentContext, -1, null, null);
}
@Override
public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
final String dbname, final String name,
final List<Partition> new_parts,
- EnvironmentContext environmentContext, IHMSHandler handler)
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList, IHMSHandler handler)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
List<Partition> oldParts = new ArrayList<>();
List<List<String>> partValsList = new ArrayList<>();
@@ -734,7 +736,7 @@ public class HiveAlterHandler implements AlterHandler {
try {
msdb.openTransaction();
- Table tbl = msdb.getTable(catName, dbname, name);
+ Table tbl = msdb.getTable(catName, dbname, name, -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to alter partitions because table or database does not exist.");
@@ -769,7 +771,7 @@ public class HiveAlterHandler implements AlterHandler {
}
}
- msdb.alterPartitions(catName, dbname, name, partValsList, new_parts);
+ msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, txnId, writeIdList);
Iterator<Partition> oldPartsIt = oldParts.iterator();
for (Partition newPart : new_parts) {
Partition oldPart;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 9241e29..f7b11c8 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2428,7 +2428,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
private boolean is_table_exists(RawStore ms, String catName, String dbname, String name)
throws MetaException {
- return (ms.getTable(catName, dbname, name) != null);
+ return (ms.getTable(catName, dbname, name, -1, null) != null);
}
private boolean drop_table_core(final RawStore ms, final String catName, final String dbname,
@@ -2590,7 +2590,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
tableDnsPath = wh.getDnsPath(tablePath);
}
List<Path> partPaths = new ArrayList<>();
- Table tbl = ms.getTable(catName, dbName, tableName);
+ Table tbl = ms.getTable(catName, dbName, tableName, -1, null);
// call dropPartition on each of the table's partitions to follow the
// procedure for cleanly dropping partitions.
@@ -2833,7 +2833,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
public Table get_table(final String dbname, final String name) throws MetaException,
NoSuchObjectException {
String[] parsedDbName = parseDbName(dbname, conf);
- return getTableInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null);
+ return getTableInternal(
+ parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, -1, null);
}
@Override
@@ -2841,11 +2842,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
NoSuchObjectException {
String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
return new GetTableResult(getTableInternal(catName, req.getDbName(), req.getTblName(),
- req.getCapabilities()));
+ req.getCapabilities(), req.getTxnId(), req.getValidWriteIdList()));
}
private Table getTableInternal(String catName, String dbname, String name,
- ClientCapabilities capabilities) throws MetaException, NoSuchObjectException {
+ ClientCapabilities capabilities, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
if (isInTest) {
assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY,
"Hive tests", "get_table_req");
@@ -2855,7 +2857,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
startTableFunction("get_table", catName, dbname, name);
Exception ex = null;
try {
- t = get_table_core(catName, dbname, name);
+ t = get_table_core(catName, dbname, name, txnId, writeIdList);
if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) {
assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES,
"insert-only tables", "get_table_req");
@@ -2890,11 +2892,25 @@ public class HiveMetaStore extends ThriftHiveMetastore {
}
@Override
- public Table get_table_core(final String catName, final String dbname, final String name)
+ public Table get_table_core(
+ final String catName,
+ final String dbname,
+ final String name)
+ throws MetaException, NoSuchObjectException {
+ return get_table_core(catName, dbname, name, -1, null);
+ }
+
+ @Override
+ public Table get_table_core(
+ final String catName,
+ final String dbname,
+ final String name,
+ final long txnId,
+ final String writeIdList)
throws MetaException, NoSuchObjectException {
Table t = null;
try {
- t = getMS().getTable(catName, dbname, name);
+ t = getMS().getTable(catName, dbname, name, txnId, writeIdList);
if (t == null) {
throw new NoSuchObjectException(TableName.getQualified(catName, dbname, name) +
" table not found");
@@ -3076,7 +3092,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern);
- tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName());
+ tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to add partition because table or database do not exist");
@@ -3270,7 +3286,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
try {
ms.openTransaction();
- tbl = ms.getTable(catName, dbName, tblName);
+ tbl = ms.getTable(catName, dbName, tblName, -1, null);
if (tbl == null) {
throw new InvalidObjectException("Unable to add partitions because "
+ TableName.getQualified(catName, dbName, tblName) +
@@ -3541,7 +3557,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
Database db = null;
try {
ms.openTransaction();
- tbl = ms.getTable(catName, dbName, tblName);
+ tbl = ms.getTable(catName, dbName, tblName, -1, null);
if (tbl == null) {
throw new InvalidObjectException("Unable to add partitions because "
+ "database or table " + dbName + "." + tblName + " does not exist");
@@ -3795,7 +3811,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
}
try {
ms.openTransaction();
- tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName());
+ tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to add partition because table or database do not exist");
@@ -3929,14 +3945,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
ms.openTransaction();
Table destinationTable =
- ms.getTable(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName);
+ ms.getTable(
+ parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName, -1, null);
if (destinationTable == null) {
throw new MetaException( "The destination table " +
TableName.getQualified(parsedDestDbName[CAT_NAME],
parsedDestDbName[DB_NAME], destTableName) + " not found");
}
Table sourceTable =
- ms.getTable(parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName);
+ ms.getTable(
+ parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName, -1, null);
if (sourceTable == null) {
throw new MetaException("The source table " +
TableName.getQualified(parsedSourceDbName[CAT_NAME],
@@ -4111,7 +4129,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
try {
ms.openTransaction();
part = ms.getPartition(catName, db_name, tbl_name, part_vals);
- tbl = get_table_core(catName, db_name, tbl_name);
+ tbl = get_table_core(catName, db_name, tbl_name, -1, null);
isExternalTbl = isExternal(tbl);
firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
mustPurge = isMustPurge(envContext, tbl);
@@ -4839,7 +4857,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
Table table = null;
if (!listeners.isEmpty()) {
if (table == null) {
- table = getMS().getTable(catName, db_name, tbl_name);
+ table = getMS().getTable(catName, db_name, tbl_name, -1, null);
}
MetaStoreListenerNotifier.notifyEvent(listeners,
@@ -4868,12 +4886,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
public void alter_partitions(final String db_name, final String tbl_name,
final List<Partition> new_parts)
throws TException {
- alter_partitions_with_environment_context(db_name, tbl_name, new_parts, null);
+ alter_partitions_with_environment_context(
+ db_name, tbl_name, new_parts, null, -1, null);
}
@Override
public void alter_partitions_with_environment_context(final String db_name, final String tbl_name,
- final List<Partition> new_parts, EnvironmentContext environmentContext)
+ final List<Partition> new_parts, EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
throws TException {
String[] parsedDbName = parseDbName(db_name, conf);
@@ -4897,7 +4917,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
firePreEvent(new PreAlterPartitionEvent(parsedDbName[DB_NAME], tbl_name, null, tmpPart, this));
}
oldParts = alterHandler.alterPartitions(getMS(), wh, parsedDbName[CAT_NAME],
- parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, this);
+ parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, txnId, writeIdList, this);
Iterator<Partition> olditr = oldParts.iterator();
// Only fetch the table if we have a listener that needs it.
Table table = null;
@@ -4911,7 +4931,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
}
if (table == null) {
- table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
+ table = getMS().getTable(
+ parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, -1, null);
}
if (!listeners.isEmpty()) {
@@ -5336,7 +5357,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
private List<String> getPartValsFromName(RawStore ms, String catName, String dbName,
String tblName, String partName)
throws MetaException, InvalidObjectException {
- Table t = ms.getTable(catName, dbName, tblName);
+ Table t = ms.getTable(catName, dbName, tblName, -1, null);
if (t == null) {
throw new InvalidObjectException(dbName + "." + tblName
+ " table not found");
@@ -5591,7 +5612,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
ColumnStatistics statsObj = null;
try {
statsObj = getMS().getTableColumnStatistics(
- parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName));
+ parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName),
+ -1, null);
if (statsObj != null) {
assert statsObj.getStatsObjSize() <= 1;
}
@@ -5615,7 +5637,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
lowerCaseColNames.add(colName.toLowerCase());
}
try {
- ColumnStatistics cs = getMS().getTableColumnStatistics(catName, dbName, tblName, lowerCaseColNames);
+ ColumnStatistics cs = getMS().getTableColumnStatistics(
+ catName, dbName, tblName, lowerCaseColNames,
+ request.getTxnId(), request.getValidWriteIdList());
result = new TableStatsResult((cs == null || cs.getStatsObj() == null)
? Lists.newArrayList() : cs.getStatsObj());
} finally {
@@ -7322,8 +7346,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
AggrStats aggrStats = null;
try {
- aggrStats = new AggrStats(getMS().get_aggr_stats_for(catName, dbName, tblName,
- lowerCasePartNames, lowerCaseColNames));
+ AggrStats tmpAggrStats = getMS().get_aggr_stats_for(catName, dbName, tblName,
+ lowerCasePartNames, lowerCaseColNames, request.getTxnId(),
+ request.getValidWriteIdList());
+ if (tmpAggrStats != null ) {
+ aggrStats = new AggrStats(tmpAggrStats);
+ }
return aggrStats;
} finally {
endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName());
@@ -7357,7 +7385,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
} else {
if (request.isSetNeedMerge() && request.isNeedMerge()) {
// one single call to get all column stats
- ColumnStatistics csOld = getMS().getTableColumnStatistics(catName, dbName, tableName, colNames);
+ ColumnStatistics csOld =
+ getMS().getTableColumnStatistics(
+ catName, dbName, tableName, colNames,
+ request.getTxnId(), request.getValidWriteIdList());
Table t = getTable(catName, dbName, tableName);
// we first use t.getParameters() to prune the stats
MetaStoreUtils.getMergableCols(firstColStats, t.getParameters());
@@ -7397,8 +7428,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
// a single call to get all column stats for all partitions
List<String> partitionNames = new ArrayList<>();
partitionNames.addAll(newStatsMap.keySet());
- List<ColumnStatistics> csOlds = getMS().getPartitionColumnStatistics(catName, dbName,
- tableName, partitionNames, colNames);
+ List<ColumnStatistics> csOlds =
+ getMS().getPartitionColumnStatistics(
+ catName, dbName, tableName, partitionNames, colNames,
+ request.getTxnId(), request.getValidWriteIdList());
if (newStatsMap.values().size() != csOlds.size()) {
// some of the partitions miss stats.
LOG.debug("Some of the partitions miss stats.");
@@ -7412,7 +7445,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
mapToPart.put(partitionNames.get(index), partitions.get(index));
}
}
- Table t = getTable(catName, dbName, tableName);
+ Table t = getTable(catName, dbName, tableName,
+ request.getTxnId(), request.getValidWriteIdList());
for (Entry<String, ColumnStatistics> entry : newStatsMap.entrySet()) {
ColumnStatistics csNew = entry.getValue();
ColumnStatistics csOld = oldStatsMap.get(entry.getKey());
@@ -7439,7 +7473,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
private Table getTable(String catName, String dbName, String tableName)
throws MetaException, InvalidObjectException {
- Table t = getMS().getTable(catName, dbName, tableName);
+ return getTable(catName, dbName, tableName, -1, null);
+ }
+
+ private Table getTable(String catName, String dbName, String tableName,
+ long txnId, String writeIdList)
+ throws MetaException, InvalidObjectException {
+ Table t = getMS().getTable(catName, dbName, tableName, txnId, writeIdList);
if (t == null) {
throw new InvalidObjectException(TableName.getQualified(catName, dbName, tableName)
+ " table not found");
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 8990928..85c00bc 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -788,6 +788,50 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
}
@Override
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String dbName, String tableName, List<String> partNames, List<String> colNames,
+ long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName,
+ partNames, colNames, txnId, validWriteIdList);
+ }
+
+ @Override
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String catName, String dbName, String tableName, List<String> partNames,
+ List<String> colNames, long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames,
+ partNames);
+ rqst.setCatName(catName);
+ rqst.setTxnId(txnId);
+ rqst.setValidWriteIdList(validWriteIdList);
+ return client.get_partitions_statistics_req(rqst).getPartStats();
+ }
+
+ @Override
+ public AggrStats getAggrColStatsFor(String dbName, String tblName, List<String> colNames,
+ List<String> partNames, long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames,
+ partNames, txnId, writeIdList); }
+
+ @Override
+ public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List<String> colNames,
+ List<String> partNames, long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ if (colNames.isEmpty() || partNames.isEmpty()) {
+ LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
+ return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
+ }
+ PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames);
+ req.setCatName(catName);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(writeIdList);
+ return client.get_aggr_stats_for(req);
+ }
+
+ @Override
public List<Partition> exchange_partitions(Map<String, String> partitionSpecs, String sourceCat,
String sourceDb, String sourceTable, String destCat,
String destDb, String destTableName) throws TException {
@@ -1581,6 +1625,14 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
}
@Override
+ public Table getTable(String dbname, String name,
+ long txnId, String validWriteIdList)
+ throws MetaException, TException, NoSuchObjectException{
+ return getTable(getDefaultCatalog(conf), dbname, name,
+ txnId, validWriteIdList);
+ };
+
+ @Override
public Table getTable(String catName, String dbName, String tableName) throws TException {
GetTableRequest req = new GetTableRequest(dbName, tableName);
req.setCatName(catName);
@@ -1590,6 +1642,18 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
}
@Override
+ public Table getTable(String catName, String dbName, String tableName,
+ long txnId, String validWriteIdList) throws TException {
+ GetTableRequest req = new GetTableRequest(dbName, tableName);
+ req.setCatName(catName);
+ req.setCapabilities(version);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(validWriteIdList);
+ Table t = client.get_table_req(req).getTable();
+ return deepCopy(filterHook.filterTable(t));
+ }
+
+ @Override
public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
throws TException {
return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames);
@@ -1818,21 +1882,38 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
@Override
public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
throws TException {
- alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null);
+ alter_partitions(
+ getDefaultCatalog(conf), dbName, tblName, newParts, null, -1, null);
}
@Override
public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
EnvironmentContext environmentContext) throws TException {
- alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext);
+ alter_partitions(
+ getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext, -1, null);
+ }
+
+ @Override
+ public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
+ throws InvalidOperationException, MetaException, TException {
+ //client.alter_partition_with_environment_context(getDefaultCatalog(conf),
+ // dbName, tblName, newParts, environmentContext);
+ alter_partitions(getDefaultCatalog(conf),
+ dbName, tblName, newParts, environmentContext, txnId, writeIdList);
+
}
@Override
public void alter_partitions(String catName, String dbName, String tblName,
List<Partition> newParts,
- EnvironmentContext environmentContext) throws TException {
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList) throws TException {
+ //client.alter_partitions_with_environment_context(prependCatalogToDbName(catName, dbName, conf),
+ // tblName, newParts, environmentContext);
client.alter_partitions_with_environment_context(prependCatalogToDbName(catName, dbName, conf),
- tblName, newParts, environmentContext);
+ tblName, newParts, environmentContext, txnId, writeIdList);
}
@Override
@@ -1964,6 +2045,28 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
}
@Override
+ public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+ List<String> colNames,
+ long txnId,
+ String validWriteIdList) throws TException {
+ return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames,
+ txnId, validWriteIdList);
+ }
+
+ @Override
+ public List<ColumnStatisticsObj> getTableColumnStatistics(String catName, String dbName,
+ String tableName,
+ List<String> colNames,
+ long txnId,
+ String validWriteIdList) throws TException {
+ TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames);
+ rqst.setCatName(catName);
+ rqst.setTxnId(txnId);
+ rqst.setValidWriteIdList(validWriteIdList);
+ return client.get_table_statistics_req(rqst).getTableStats();
+ }
+
+ @Override
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
String dbName, String tableName, List<String> partNames, List<String> colNames)
throws TException {
@@ -3316,4 +3419,5 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
req.setMaxCreateTime(maxCreateTime);
return client.get_runtime_stats(req);
}
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 29c98d1..3a65f77 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -90,6 +90,11 @@ public interface IHMSHandler extends ThriftHiveMetastore.Iface, Configurable {
Table get_table_core(final String catName, final String dbname, final String name)
throws MetaException, NoSuchObjectException;
+ Table get_table_core(final String catName, final String dbname,
+ final String name, final long txnId,
+ final String writeIdList)
+ throws MetaException, NoSuchObjectException;
+
/**
* Get a list of all transactional listeners.
* @return list of listeners.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index bc09076..c4cd8b4 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -712,6 +712,10 @@ public interface IMetaStoreClient {
Table getTable(String dbName, String tableName) throws MetaException,
TException, NoSuchObjectException;
+ Table getTable(String dbName, String tableName,
+ long txnId, String validWriteIdList)
+ throws MetaException, TException, NoSuchObjectException;
+
/**
* Get a table object.
* @param catName catalog the table is in.
@@ -723,6 +727,8 @@ public interface IMetaStoreClient {
*/
Table getTable(String catName, String dbName, String tableName) throws MetaException, TException;
+ Table getTable(String catName, String dbName, String tableName,
+ long txnId, String validWriteIdList) throws TException;
/**
* Get tables as objects (rather than just fetching their names). This is more expensive and
* should only be used if you actually need all the information about the tables.
@@ -2125,6 +2131,11 @@ public interface IMetaStoreClient {
EnvironmentContext environmentContext)
throws InvalidOperationException, MetaException, TException;
+ void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
+ throws InvalidOperationException, MetaException, TException;
+
/**
* updates a list of partitions
* @param catName catalog name.
@@ -2144,7 +2155,7 @@ public interface IMetaStoreClient {
default void alter_partitions(String catName, String dbName, String tblName,
List<Partition> newParts)
throws InvalidOperationException, MetaException, TException {
- alter_partitions(catName, dbName, tblName, newParts, null);
+ alter_partitions(catName, dbName, tblName, newParts, null,-1, null);
}
/**
@@ -2165,7 +2176,8 @@ public interface IMetaStoreClient {
* if error in communicating with metastore server
*/
void alter_partitions(String catName, String dbName, String tblName, List<Partition> newParts,
- EnvironmentContext environmentContext)
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
throws InvalidOperationException, MetaException, TException;
/**
@@ -2346,6 +2358,12 @@ public interface IMetaStoreClient {
List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
List<String> colNames) throws NoSuchObjectException, MetaException, TException;
+ List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+ List<String> colNames,
+ long txnId,
+ String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException;
+
/**
* Get the column statistics for a set of columns in a table. This should only be used for
* non-partitioned tables. For partitioned tables use
@@ -2363,6 +2381,11 @@ public interface IMetaStoreClient {
List<String> colNames)
throws NoSuchObjectException, MetaException, TException;
+ List<ColumnStatisticsObj> getTableColumnStatistics(String catName, String dbName, String tableName,
+ List<String> colNames,
+ long txnId,
+ String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException;
/**
* Get the column statistics for a set of columns in a partition.
* @param dbName database name
@@ -2379,6 +2402,11 @@ public interface IMetaStoreClient {
String tableName, List<String> partNames, List<String> colNames)
throws NoSuchObjectException, MetaException, TException;
+ Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(String dbName,
+ String tableName, List<String> partNames, List<String> colNames,
+ long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException;
+
/**
* Get the column statistics for a set of columns in a partition.
* @param catName catalog name
@@ -2396,6 +2424,11 @@ public interface IMetaStoreClient {
String catName, String dbName, String tableName, List<String> partNames, List<String> colNames)
throws NoSuchObjectException, MetaException, TException;
+ Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String catName, String dbName, String tableName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException;
/**
* Delete partition level column statistics given dbName, tableName, partName and colName, or
* all columns in a partition.
@@ -3237,6 +3270,10 @@ public interface IMetaStoreClient {
AggrStats getAggrColStatsFor(String dbName, String tblName,
List<String> colNames, List<String> partName) throws NoSuchObjectException, MetaException, TException;
+ AggrStats getAggrColStatsFor(String dbName, String tblName,
+ List<String> colNames, List<String> partName,
+ long txnId, String writeIdList) throws NoSuchObjectException, MetaException, TException;
+
/**
* Get aggregated column stats for a set of partitions.
* @param catName catalog name
@@ -3253,6 +3290,10 @@ public interface IMetaStoreClient {
List<String> colNames, List<String> partNames)
throws NoSuchObjectException, MetaException, TException;
+ AggrStats getAggrColStatsFor(String catName, String dbName, String tblName,
+ List<String> colNames, List<String> partNames,
+ long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException;
/**
* Set table or partition column statistics.
* @param request request object, contains all the table, partition, and statistics information
[10/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index e459bc2..aeb6b70 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -2334,14 +2334,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1202;
- ::apache::thrift::protocol::TType _etype1205;
- xfer += iprot->readListBegin(_etype1205, _size1202);
- this->success.resize(_size1202);
- uint32_t _i1206;
- for (_i1206 = 0; _i1206 < _size1202; ++_i1206)
+ uint32_t _size1211;
+ ::apache::thrift::protocol::TType _etype1214;
+ xfer += iprot->readListBegin(_etype1214, _size1211);
+ this->success.resize(_size1211);
+ uint32_t _i1215;
+ for (_i1215 = 0; _i1215 < _size1211; ++_i1215)
{
- xfer += iprot->readString(this->success[_i1206]);
+ xfer += iprot->readString(this->success[_i1215]);
}
xfer += iprot->readListEnd();
}
@@ -2380,10 +2380,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1207;
- for (_iter1207 = this->success.begin(); _iter1207 != this->success.end(); ++_iter1207)
+ std::vector<std::string> ::const_iterator _iter1216;
+ for (_iter1216 = this->success.begin(); _iter1216 != this->success.end(); ++_iter1216)
{
- xfer += oprot->writeString((*_iter1207));
+ xfer += oprot->writeString((*_iter1216));
}
xfer += oprot->writeListEnd();
}
@@ -2428,14 +2428,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1208;
- ::apache::thrift::protocol::TType _etype1211;
- xfer += iprot->readListBegin(_etype1211, _size1208);
- (*(this->success)).resize(_size1208);
- uint32_t _i1212;
- for (_i1212 = 0; _i1212 < _size1208; ++_i1212)
+ uint32_t _size1217;
+ ::apache::thrift::protocol::TType _etype1220;
+ xfer += iprot->readListBegin(_etype1220, _size1217);
+ (*(this->success)).resize(_size1217);
+ uint32_t _i1221;
+ for (_i1221 = 0; _i1221 < _size1217; ++_i1221)
{
- xfer += iprot->readString((*(this->success))[_i1212]);
+ xfer += iprot->readString((*(this->success))[_i1221]);
}
xfer += iprot->readListEnd();
}
@@ -2552,14 +2552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1213;
- ::apache::thrift::protocol::TType _etype1216;
- xfer += iprot->readListBegin(_etype1216, _size1213);
- this->success.resize(_size1213);
- uint32_t _i1217;
- for (_i1217 = 0; _i1217 < _size1213; ++_i1217)
+ uint32_t _size1222;
+ ::apache::thrift::protocol::TType _etype1225;
+ xfer += iprot->readListBegin(_etype1225, _size1222);
+ this->success.resize(_size1222);
+ uint32_t _i1226;
+ for (_i1226 = 0; _i1226 < _size1222; ++_i1226)
{
- xfer += iprot->readString(this->success[_i1217]);
+ xfer += iprot->readString(this->success[_i1226]);
}
xfer += iprot->readListEnd();
}
@@ -2598,10 +2598,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1218;
- for (_iter1218 = this->success.begin(); _iter1218 != this->success.end(); ++_iter1218)
+ std::vector<std::string> ::const_iterator _iter1227;
+ for (_iter1227 = this->success.begin(); _iter1227 != this->success.end(); ++_iter1227)
{
- xfer += oprot->writeString((*_iter1218));
+ xfer += oprot->writeString((*_iter1227));
}
xfer += oprot->writeListEnd();
}
@@ -2646,14 +2646,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1219;
- ::apache::thrift::protocol::TType _etype1222;
- xfer += iprot->readListBegin(_etype1222, _size1219);
- (*(this->success)).resize(_size1219);
- uint32_t _i1223;
- for (_i1223 = 0; _i1223 < _size1219; ++_i1223)
+ uint32_t _size1228;
+ ::apache::thrift::protocol::TType _etype1231;
+ xfer += iprot->readListBegin(_etype1231, _size1228);
+ (*(this->success)).resize(_size1228);
+ uint32_t _i1232;
+ for (_i1232 = 0; _i1232 < _size1228; ++_i1232)
{
- xfer += iprot->readString((*(this->success))[_i1223]);
+ xfer += iprot->readString((*(this->success))[_i1232]);
}
xfer += iprot->readListEnd();
}
@@ -3715,17 +3715,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size1224;
- ::apache::thrift::protocol::TType _ktype1225;
- ::apache::thrift::protocol::TType _vtype1226;
- xfer += iprot->readMapBegin(_ktype1225, _vtype1226, _size1224);
- uint32_t _i1228;
- for (_i1228 = 0; _i1228 < _size1224; ++_i1228)
+ uint32_t _size1233;
+ ::apache::thrift::protocol::TType _ktype1234;
+ ::apache::thrift::protocol::TType _vtype1235;
+ xfer += iprot->readMapBegin(_ktype1234, _vtype1235, _size1233);
+ uint32_t _i1237;
+ for (_i1237 = 0; _i1237 < _size1233; ++_i1237)
{
- std::string _key1229;
- xfer += iprot->readString(_key1229);
- Type& _val1230 = this->success[_key1229];
- xfer += _val1230.read(iprot);
+ std::string _key1238;
+ xfer += iprot->readString(_key1238);
+ Type& _val1239 = this->success[_key1238];
+ xfer += _val1239.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -3764,11 +3764,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::map<std::string, Type> ::const_iterator _iter1231;
- for (_iter1231 = this->success.begin(); _iter1231 != this->success.end(); ++_iter1231)
+ std::map<std::string, Type> ::const_iterator _iter1240;
+ for (_iter1240 = this->success.begin(); _iter1240 != this->success.end(); ++_iter1240)
{
- xfer += oprot->writeString(_iter1231->first);
- xfer += _iter1231->second.write(oprot);
+ xfer += oprot->writeString(_iter1240->first);
+ xfer += _iter1240->second.write(oprot);
}
xfer += oprot->writeMapEnd();
}
@@ -3813,17 +3813,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size1232;
- ::apache::thrift::protocol::TType _ktype1233;
- ::apache::thrift::protocol::TType _vtype1234;
- xfer += iprot->readMapBegin(_ktype1233, _vtype1234, _size1232);
- uint32_t _i1236;
- for (_i1236 = 0; _i1236 < _size1232; ++_i1236)
+ uint32_t _size1241;
+ ::apache::thrift::protocol::TType _ktype1242;
+ ::apache::thrift::protocol::TType _vtype1243;
+ xfer += iprot->readMapBegin(_ktype1242, _vtype1243, _size1241);
+ uint32_t _i1245;
+ for (_i1245 = 0; _i1245 < _size1241; ++_i1245)
{
- std::string _key1237;
- xfer += iprot->readString(_key1237);
- Type& _val1238 = (*(this->success))[_key1237];
- xfer += _val1238.read(iprot);
+ std::string _key1246;
+ xfer += iprot->readString(_key1246);
+ Type& _val1247 = (*(this->success))[_key1246];
+ xfer += _val1247.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -3977,14 +3977,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1239;
- ::apache::thrift::protocol::TType _etype1242;
- xfer += iprot->readListBegin(_etype1242, _size1239);
- this->success.resize(_size1239);
- uint32_t _i1243;
- for (_i1243 = 0; _i1243 < _size1239; ++_i1243)
+ uint32_t _size1248;
+ ::apache::thrift::protocol::TType _etype1251;
+ xfer += iprot->readListBegin(_etype1251, _size1248);
+ this->success.resize(_size1248);
+ uint32_t _i1252;
+ for (_i1252 = 0; _i1252 < _size1248; ++_i1252)
{
- xfer += this->success[_i1243].read(iprot);
+ xfer += this->success[_i1252].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4039,10 +4039,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1244;
- for (_iter1244 = this->success.begin(); _iter1244 != this->success.end(); ++_iter1244)
+ std::vector<FieldSchema> ::const_iterator _iter1253;
+ for (_iter1253 = this->success.begin(); _iter1253 != this->success.end(); ++_iter1253)
{
- xfer += (*_iter1244).write(oprot);
+ xfer += (*_iter1253).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4095,14 +4095,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1245;
- ::apache::thrift::protocol::TType _etype1248;
- xfer += iprot->readListBegin(_etype1248, _size1245);
- (*(this->success)).resize(_size1245);
- uint32_t _i1249;
- for (_i1249 = 0; _i1249 < _size1245; ++_i1249)
+ uint32_t _size1254;
+ ::apache::thrift::protocol::TType _etype1257;
+ xfer += iprot->readListBegin(_etype1257, _size1254);
+ (*(this->success)).resize(_size1254);
+ uint32_t _i1258;
+ for (_i1258 = 0; _i1258 < _size1254; ++_i1258)
{
- xfer += (*(this->success))[_i1249].read(iprot);
+ xfer += (*(this->success))[_i1258].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4288,14 +4288,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1250;
- ::apache::thrift::protocol::TType _etype1253;
- xfer += iprot->readListBegin(_etype1253, _size1250);
- this->success.resize(_size1250);
- uint32_t _i1254;
- for (_i1254 = 0; _i1254 < _size1250; ++_i1254)
+ uint32_t _size1259;
+ ::apache::thrift::protocol::TType _etype1262;
+ xfer += iprot->readListBegin(_etype1262, _size1259);
+ this->success.resize(_size1259);
+ uint32_t _i1263;
+ for (_i1263 = 0; _i1263 < _size1259; ++_i1263)
{
- xfer += this->success[_i1254].read(iprot);
+ xfer += this->success[_i1263].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4350,10 +4350,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1255;
- for (_iter1255 = this->success.begin(); _iter1255 != this->success.end(); ++_iter1255)
+ std::vector<FieldSchema> ::const_iterator _iter1264;
+ for (_iter1264 = this->success.begin(); _iter1264 != this->success.end(); ++_iter1264)
{
- xfer += (*_iter1255).write(oprot);
+ xfer += (*_iter1264).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4406,14 +4406,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1256;
- ::apache::thrift::protocol::TType _etype1259;
- xfer += iprot->readListBegin(_etype1259, _size1256);
- (*(this->success)).resize(_size1256);
- uint32_t _i1260;
- for (_i1260 = 0; _i1260 < _size1256; ++_i1260)
+ uint32_t _size1265;
+ ::apache::thrift::protocol::TType _etype1268;
+ xfer += iprot->readListBegin(_etype1268, _size1265);
+ (*(this->success)).resize(_size1265);
+ uint32_t _i1269;
+ for (_i1269 = 0; _i1269 < _size1265; ++_i1269)
{
- xfer += (*(this->success))[_i1260].read(iprot);
+ xfer += (*(this->success))[_i1269].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4583,14 +4583,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1261;
- ::apache::thrift::protocol::TType _etype1264;
- xfer += iprot->readListBegin(_etype1264, _size1261);
- this->success.resize(_size1261);
- uint32_t _i1265;
- for (_i1265 = 0; _i1265 < _size1261; ++_i1265)
+ uint32_t _size1270;
+ ::apache::thrift::protocol::TType _etype1273;
+ xfer += iprot->readListBegin(_etype1273, _size1270);
+ this->success.resize(_size1270);
+ uint32_t _i1274;
+ for (_i1274 = 0; _i1274 < _size1270; ++_i1274)
{
- xfer += this->success[_i1265].read(iprot);
+ xfer += this->success[_i1274].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4645,10 +4645,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1266;
- for (_iter1266 = this->success.begin(); _iter1266 != this->success.end(); ++_iter1266)
+ std::vector<FieldSchema> ::const_iterator _iter1275;
+ for (_iter1275 = this->success.begin(); _iter1275 != this->success.end(); ++_iter1275)
{
- xfer += (*_iter1266).write(oprot);
+ xfer += (*_iter1275).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4701,14 +4701,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1267;
- ::apache::thrift::protocol::TType _etype1270;
- xfer += iprot->readListBegin(_etype1270, _size1267);
- (*(this->success)).resize(_size1267);
- uint32_t _i1271;
- for (_i1271 = 0; _i1271 < _size1267; ++_i1271)
+ uint32_t _size1276;
+ ::apache::thrift::protocol::TType _etype1279;
+ xfer += iprot->readListBegin(_etype1279, _size1276);
+ (*(this->success)).resize(_size1276);
+ uint32_t _i1280;
+ for (_i1280 = 0; _i1280 < _size1276; ++_i1280)
{
- xfer += (*(this->success))[_i1271].read(iprot);
+ xfer += (*(this->success))[_i1280].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4894,14 +4894,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1272;
- ::apache::thrift::protocol::TType _etype1275;
- xfer += iprot->readListBegin(_etype1275, _size1272);
- this->success.resize(_size1272);
- uint32_t _i1276;
- for (_i1276 = 0; _i1276 < _size1272; ++_i1276)
+ uint32_t _size1281;
+ ::apache::thrift::protocol::TType _etype1284;
+ xfer += iprot->readListBegin(_etype1284, _size1281);
+ this->success.resize(_size1281);
+ uint32_t _i1285;
+ for (_i1285 = 0; _i1285 < _size1281; ++_i1285)
{
- xfer += this->success[_i1276].read(iprot);
+ xfer += this->success[_i1285].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4956,10 +4956,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1277;
- for (_iter1277 = this->success.begin(); _iter1277 != this->success.end(); ++_iter1277)
+ std::vector<FieldSchema> ::const_iterator _iter1286;
+ for (_iter1286 = this->success.begin(); _iter1286 != this->success.end(); ++_iter1286)
{
- xfer += (*_iter1277).write(oprot);
+ xfer += (*_iter1286).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5012,14 +5012,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1278;
- ::apache::thrift::protocol::TType _etype1281;
- xfer += iprot->readListBegin(_etype1281, _size1278);
- (*(this->success)).resize(_size1278);
- uint32_t _i1282;
- for (_i1282 = 0; _i1282 < _size1278; ++_i1282)
+ uint32_t _size1287;
+ ::apache::thrift::protocol::TType _etype1290;
+ xfer += iprot->readListBegin(_etype1290, _size1287);
+ (*(this->success)).resize(_size1287);
+ uint32_t _i1291;
+ for (_i1291 = 0; _i1291 < _size1287; ++_i1291)
{
- xfer += (*(this->success))[_i1282].read(iprot);
+ xfer += (*(this->success))[_i1291].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5612,14 +5612,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->primaryKeys.clear();
- uint32_t _size1283;
- ::apache::thrift::protocol::TType _etype1286;
- xfer += iprot->readListBegin(_etype1286, _size1283);
- this->primaryKeys.resize(_size1283);
- uint32_t _i1287;
- for (_i1287 = 0; _i1287 < _size1283; ++_i1287)
+ uint32_t _size1292;
+ ::apache::thrift::protocol::TType _etype1295;
+ xfer += iprot->readListBegin(_etype1295, _size1292);
+ this->primaryKeys.resize(_size1292);
+ uint32_t _i1296;
+ for (_i1296 = 0; _i1296 < _size1292; ++_i1296)
{
- xfer += this->primaryKeys[_i1287].read(iprot);
+ xfer += this->primaryKeys[_i1296].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5632,14 +5632,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->foreignKeys.clear();
- uint32_t _size1288;
- ::apache::thrift::protocol::TType _etype1291;
- xfer += iprot->readListBegin(_etype1291, _size1288);
- this->foreignKeys.resize(_size1288);
- uint32_t _i1292;
- for (_i1292 = 0; _i1292 < _size1288; ++_i1292)
+ uint32_t _size1297;
+ ::apache::thrift::protocol::TType _etype1300;
+ xfer += iprot->readListBegin(_etype1300, _size1297);
+ this->foreignKeys.resize(_size1297);
+ uint32_t _i1301;
+ for (_i1301 = 0; _i1301 < _size1297; ++_i1301)
{
- xfer += this->foreignKeys[_i1292].read(iprot);
+ xfer += this->foreignKeys[_i1301].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5652,14 +5652,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->uniqueConstraints.clear();
- uint32_t _size1293;
- ::apache::thrift::protocol::TType _etype1296;
- xfer += iprot->readListBegin(_etype1296, _size1293);
- this->uniqueConstraints.resize(_size1293);
- uint32_t _i1297;
- for (_i1297 = 0; _i1297 < _size1293; ++_i1297)
+ uint32_t _size1302;
+ ::apache::thrift::protocol::TType _etype1305;
+ xfer += iprot->readListBegin(_etype1305, _size1302);
+ this->uniqueConstraints.resize(_size1302);
+ uint32_t _i1306;
+ for (_i1306 = 0; _i1306 < _size1302; ++_i1306)
{
- xfer += this->uniqueConstraints[_i1297].read(iprot);
+ xfer += this->uniqueConstraints[_i1306].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5672,14 +5672,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->notNullConstraints.clear();
- uint32_t _size1298;
- ::apache::thrift::protocol::TType _etype1301;
- xfer += iprot->readListBegin(_etype1301, _size1298);
- this->notNullConstraints.resize(_size1298);
- uint32_t _i1302;
- for (_i1302 = 0; _i1302 < _size1298; ++_i1302)
+ uint32_t _size1307;
+ ::apache::thrift::protocol::TType _etype1310;
+ xfer += iprot->readListBegin(_etype1310, _size1307);
+ this->notNullConstraints.resize(_size1307);
+ uint32_t _i1311;
+ for (_i1311 = 0; _i1311 < _size1307; ++_i1311)
{
- xfer += this->notNullConstraints[_i1302].read(iprot);
+ xfer += this->notNullConstraints[_i1311].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5692,14 +5692,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->defaultConstraints.clear();
- uint32_t _size1303;
- ::apache::thrift::protocol::TType _etype1306;
- xfer += iprot->readListBegin(_etype1306, _size1303);
- this->defaultConstraints.resize(_size1303);
- uint32_t _i1307;
- for (_i1307 = 0; _i1307 < _size1303; ++_i1307)
+ uint32_t _size1312;
+ ::apache::thrift::protocol::TType _etype1315;
+ xfer += iprot->readListBegin(_etype1315, _size1312);
+ this->defaultConstraints.resize(_size1312);
+ uint32_t _i1316;
+ for (_i1316 = 0; _i1316 < _size1312; ++_i1316)
{
- xfer += this->defaultConstraints[_i1307].read(iprot);
+ xfer += this->defaultConstraints[_i1316].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5712,14 +5712,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->checkConstraints.clear();
- uint32_t _size1308;
- ::apache::thrift::protocol::TType _etype1311;
- xfer += iprot->readListBegin(_etype1311, _size1308);
- this->checkConstraints.resize(_size1308);
- uint32_t _i1312;
- for (_i1312 = 0; _i1312 < _size1308; ++_i1312)
+ uint32_t _size1317;
+ ::apache::thrift::protocol::TType _etype1320;
+ xfer += iprot->readListBegin(_etype1320, _size1317);
+ this->checkConstraints.resize(_size1317);
+ uint32_t _i1321;
+ for (_i1321 = 0; _i1321 < _size1317; ++_i1321)
{
- xfer += this->checkConstraints[_i1312].read(iprot);
+ xfer += this->checkConstraints[_i1321].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5752,10 +5752,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter1313;
- for (_iter1313 = this->primaryKeys.begin(); _iter1313 != this->primaryKeys.end(); ++_iter1313)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter1322;
+ for (_iter1322 = this->primaryKeys.begin(); _iter1322 != this->primaryKeys.end(); ++_iter1322)
{
- xfer += (*_iter1313).write(oprot);
+ xfer += (*_iter1322).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5764,10 +5764,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
- std::vector<SQLForeignKey> ::const_iterator _iter1314;
- for (_iter1314 = this->foreignKeys.begin(); _iter1314 != this->foreignKeys.end(); ++_iter1314)
+ std::vector<SQLForeignKey> ::const_iterator _iter1323;
+ for (_iter1323 = this->foreignKeys.begin(); _iter1323 != this->foreignKeys.end(); ++_iter1323)
{
- xfer += (*_iter1314).write(oprot);
+ xfer += (*_iter1323).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5776,10 +5776,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraints.size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter1315;
- for (_iter1315 = this->uniqueConstraints.begin(); _iter1315 != this->uniqueConstraints.end(); ++_iter1315)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter1324;
+ for (_iter1324 = this->uniqueConstraints.begin(); _iter1324 != this->uniqueConstraints.end(); ++_iter1324)
{
- xfer += (*_iter1315).write(oprot);
+ xfer += (*_iter1324).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5788,10 +5788,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraints.size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter1316;
- for (_iter1316 = this->notNullConstraints.begin(); _iter1316 != this->notNullConstraints.end(); ++_iter1316)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter1325;
+ for (_iter1325 = this->notNullConstraints.begin(); _iter1325 != this->notNullConstraints.end(); ++_iter1325)
{
- xfer += (*_iter1316).write(oprot);
+ xfer += (*_iter1325).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5800,10 +5800,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->defaultConstraints.size()));
- std::vector<SQLDefaultConstraint> ::const_iterator _iter1317;
- for (_iter1317 = this->defaultConstraints.begin(); _iter1317 != this->defaultConstraints.end(); ++_iter1317)
+ std::vector<SQLDefaultConstraint> ::const_iterator _iter1326;
+ for (_iter1326 = this->defaultConstraints.begin(); _iter1326 != this->defaultConstraints.end(); ++_iter1326)
{
- xfer += (*_iter1317).write(oprot);
+ xfer += (*_iter1326).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5812,10 +5812,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->checkConstraints.size()));
- std::vector<SQLCheckConstraint> ::const_iterator _iter1318;
- for (_iter1318 = this->checkConstraints.begin(); _iter1318 != this->checkConstraints.end(); ++_iter1318)
+ std::vector<SQLCheckConstraint> ::const_iterator _iter1327;
+ for (_iter1327 = this->checkConstraints.begin(); _iter1327 != this->checkConstraints.end(); ++_iter1327)
{
- xfer += (*_iter1318).write(oprot);
+ xfer += (*_iter1327).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5843,10 +5843,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter1319;
- for (_iter1319 = (*(this->primaryKeys)).begin(); _iter1319 != (*(this->primaryKeys)).end(); ++_iter1319)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter1328;
+ for (_iter1328 = (*(this->primaryKeys)).begin(); _iter1328 != (*(this->primaryKeys)).end(); ++_iter1328)
{
- xfer += (*_iter1319).write(oprot);
+ xfer += (*_iter1328).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5855,10 +5855,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
- std::vector<SQLForeignKey> ::const_iterator _iter1320;
- for (_iter1320 = (*(this->foreignKeys)).begin(); _iter1320 != (*(this->foreignKeys)).end(); ++_iter1320)
+ std::vector<SQLForeignKey> ::const_iterator _iter1329;
+ for (_iter1329 = (*(this->foreignKeys)).begin(); _iter1329 != (*(this->foreignKeys)).end(); ++_iter1329)
{
- xfer += (*_iter1320).write(oprot);
+ xfer += (*_iter1329).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5867,10 +5867,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->uniqueConstraints)).size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter1321;
- for (_iter1321 = (*(this->uniqueConstraints)).begin(); _iter1321 != (*(this->uniqueConstraints)).end(); ++_iter1321)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter1330;
+ for (_iter1330 = (*(this->uniqueConstraints)).begin(); _iter1330 != (*(this->uniqueConstraints)).end(); ++_iter1330)
{
- xfer += (*_iter1321).write(oprot);
+ xfer += (*_iter1330).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5879,10 +5879,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->notNullConstraints)).size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter1322;
- for (_iter1322 = (*(this->notNullConstraints)).begin(); _iter1322 != (*(this->notNullConstraints)).end(); ++_iter1322)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter1331;
+ for (_iter1331 = (*(this->notNullConstraints)).begin(); _iter1331 != (*(this->notNullConstraints)).end(); ++_iter1331)
{
- xfer += (*_iter1322).write(oprot);
+ xfer += (*_iter1331).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5891,10 +5891,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->defaultConstraints)).size()));
- std::vector<SQLDefaultConstraint> ::const_iterator _iter1323;
- for (_iter1323 = (*(this->defaultConstraints)).begin(); _iter1323 != (*(this->defaultConstraints)).end(); ++_iter1323)
+ std::vector<SQLDefaultConstraint> ::const_iterator _iter1332;
+ for (_iter1332 = (*(this->defaultConstraints)).begin(); _iter1332 != (*(this->defaultConstraints)).end(); ++_iter1332)
{
- xfer += (*_iter1323).write(oprot);
+ xfer += (*_iter1332).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5903,10 +5903,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->checkConstraints)).size()));
- std::vector<SQLCheckConstraint> ::const_iterator _iter1324;
- for (_iter1324 = (*(this->checkConstraints)).begin(); _iter1324 != (*(this->checkConstraints)).end(); ++_iter1324)
+ std::vector<SQLCheckConstraint> ::const_iterator _iter1333;
+ for (_iter1333 = (*(this->checkConstraints)).begin(); _iter1333 != (*(this->checkConstraints)).end(); ++_iter1333)
{
- xfer += (*_iter1324).write(oprot);
+ xfer += (*_iter1333).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8074,14 +8074,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partNames.clear();
- uint32_t _size1325;
- ::apache::thrift::protocol::TType _etype1328;
- xfer += iprot->readListBegin(_etype1328, _size1325);
- this->partNames.resize(_size1325);
- uint32_t _i1329;
- for (_i1329 = 0; _i1329 < _size1325; ++_i1329)
+ uint32_t _size1334;
+ ::apache::thrift::protocol::TType _etype1337;
+ xfer += iprot->readListBegin(_etype1337, _size1334);
+ this->partNames.resize(_size1334);
+ uint32_t _i1338;
+ for (_i1338 = 0; _i1338 < _size1334; ++_i1338)
{
- xfer += iprot->readString(this->partNames[_i1329]);
+ xfer += iprot->readString(this->partNames[_i1338]);
}
xfer += iprot->readListEnd();
}
@@ -8118,10 +8118,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
- std::vector<std::string> ::const_iterator _iter1330;
- for (_iter1330 = this->partNames.begin(); _iter1330 != this->partNames.end(); ++_iter1330)
+ std::vector<std::string> ::const_iterator _iter1339;
+ for (_iter1339 = this->partNames.begin(); _iter1339 != this->partNames.end(); ++_iter1339)
{
- xfer += oprot->writeString((*_iter1330));
+ xfer += oprot->writeString((*_iter1339));
}
xfer += oprot->writeListEnd();
}
@@ -8153,10 +8153,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partNames)).size()));
- std::vector<std::string> ::const_iterator _iter1331;
- for (_iter1331 = (*(this->partNames)).begin(); _iter1331 != (*(this->partNames)).end(); ++_iter1331)
+ std::vector<std::string> ::const_iterator _iter1340;
+ for (_iter1340 = (*(this->partNames)).begin(); _iter1340 != (*(this->partNames)).end(); ++_iter1340)
{
- xfer += oprot->writeString((*_iter1331));
+ xfer += oprot->writeString((*_iter1340));
}
xfer += oprot->writeListEnd();
}
@@ -8400,14 +8400,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1332;
- ::apache::thrift::protocol::TType _etype1335;
- xfer += iprot->readListBegin(_etype1335, _size1332);
- this->success.resize(_size1332);
- uint32_t _i1336;
- for (_i1336 = 0; _i1336 < _size1332; ++_i1336)
+ uint32_t _size1341;
+ ::apache::thrift::protocol::TType _etype1344;
+ xfer += iprot->readListBegin(_etype1344, _size1341);
+ this->success.resize(_size1341);
+ uint32_t _i1345;
+ for (_i1345 = 0; _i1345 < _size1341; ++_i1345)
{
- xfer += iprot->readString(this->success[_i1336]);
+ xfer += iprot->readString(this->success[_i1345]);
}
xfer += iprot->readListEnd();
}
@@ -8446,10 +8446,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1337;
- for (_iter1337 = this->success.begin(); _iter1337 != this->success.end(); ++_iter1337)
+ std::vector<std::string> ::const_iterator _iter1346;
+ for (_iter1346 = this->success.begin(); _iter1346 != this->success.end(); ++_iter1346)
{
- xfer += oprot->writeString((*_iter1337));
+ xfer += oprot->writeString((*_iter1346));
}
xfer += oprot->writeListEnd();
}
@@ -8494,14 +8494,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1338;
- ::apache::thrift::protocol::TType _etype1341;
- xfer += iprot->readListBegin(_etype1341, _size1338);
- (*(this->success)).resize(_size1338);
- uint32_t _i1342;
- for (_i1342 = 0; _i1342 < _size1338; ++_i1342)
+ uint32_t _size1347;
+ ::apache::thrift::protocol::TType _etype1350;
+ xfer += iprot->readListBegin(_etype1350, _size1347);
+ (*(this->success)).resize(_size1347);
+ uint32_t _i1351;
+ for (_i1351 = 0; _i1351 < _size1347; ++_i1351)
{
- xfer += iprot->readString((*(this->success))[_i1342]);
+ xfer += iprot->readString((*(this->success))[_i1351]);
}
xfer += iprot->readListEnd();
}
@@ -8671,14 +8671,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1343;
- ::apache::thrift::protocol::TType _etype1346;
- xfer += iprot->readListBegin(_etype1346, _size1343);
- this->success.resize(_size1343);
- uint32_t _i1347;
- for (_i1347 = 0; _i1347 < _size1343; ++_i1347)
+ uint32_t _size1352;
+ ::apache::thrift::protocol::TType _etype1355;
+ xfer += iprot->readListBegin(_etype1355, _size1352);
+ this->success.resize(_size1352);
+ uint32_t _i1356;
+ for (_i1356 = 0; _i1356 < _size1352; ++_i1356)
{
- xfer += iprot->readString(this->success[_i1347]);
+ xfer += iprot->readString(this->success[_i1356]);
}
xfer += iprot->readListEnd();
}
@@ -8717,10 +8717,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift::
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1348;
- for (_iter1348 = this->success.begin(); _iter1348 != this->success.end(); ++_iter1348)
+ std::vector<std::string> ::const_iterator _iter1357;
+ for (_iter1357 = this->success.begin(); _iter1357 != this->success.end(); ++_iter1357)
{
- xfer += oprot->writeString((*_iter1348));
+ xfer += oprot->writeString((*_iter1357));
}
xfer += oprot->writeListEnd();
}
@@ -8765,14 +8765,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1349;
- ::apache::thrift::protocol::TType _etype1352;
- xfer += iprot->readListBegin(_etype1352, _size1349);
- (*(this->success)).resize(_size1349);
- uint32_t _i1353;
- for (_i1353 = 0; _i1353 < _size1349; ++_i1353)
+ uint32_t _size1358;
+ ::apache::thrift::protocol::TType _etype1361;
+ xfer += iprot->readListBegin(_etype1361, _size1358);
+ (*(this->success)).resize(_size1358);
+ uint32_t _i1362;
+ for (_i1362 = 0; _i1362 < _size1358; ++_i1362)
{
- xfer += iprot->readString((*(this->success))[_i1353]);
+ xfer += iprot->readString((*(this->success))[_i1362]);
}
xfer += iprot->readListEnd();
}
@@ -8910,14 +8910,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1354;
- ::apache::thrift::protocol::TType _etype1357;
- xfer += iprot->readListBegin(_etype1357, _size1354);
- this->success.resize(_size1354);
- uint32_t _i1358;
- for (_i1358 = 0; _i1358 < _size1354; ++_i1358)
+ uint32_t _size1363;
+ ::apache::thrift::protocol::TType _etype1366;
+ xfer += iprot->readListBegin(_etype1366, _size1363);
+ this->success.resize(_size1363);
+ uint32_t _i1367;
+ for (_i1367 = 0; _i1367 < _size1363; ++_i1367)
{
- xfer += iprot->readString(this->success[_i1358]);
+ xfer += iprot->readString(this->success[_i1367]);
}
xfer += iprot->readListEnd();
}
@@ -8956,10 +8956,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write(
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1359;
- for (_iter1359 = this->success.begin(); _iter1359 != this->success.end(); ++_iter1359)
+ std::vector<std::string> ::const_iterator _iter1368;
+ for (_iter1368 = this->success.begin(); _iter1368 != this->success.end(); ++_iter1368)
{
- xfer += oprot->writeString((*_iter1359));
+ xfer += oprot->writeString((*_iter1368));
}
xfer += oprot->writeListEnd();
}
@@ -9004,14 +9004,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read(
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1360;
- ::apache::thrift::protocol::TType _etype1363;
- xfer += iprot->readListBegin(_etype1363, _size1360);
- (*(this->success)).resize(_size1360);
- uint32_t _i1364;
- for (_i1364 = 0; _i1364 < _size1360; ++_i1364)
+ uint32_t _size1369;
+ ::apache::thrift::protocol::TType _etype1372;
+ xfer += iprot->readListBegin(_etype1372, _size1369);
+ (*(this->success)).resize(_size1369);
+ uint32_t _i1373;
+ for (_i1373 = 0; _i1373 < _size1369; ++_i1373)
{
- xfer += iprot->readString((*(this->success))[_i1364]);
+ xfer += iprot->readString((*(this->success))[_i1373]);
}
xfer += iprot->readListEnd();
}
@@ -9086,14 +9086,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_types.clear();
- uint32_t _size1365;
- ::apache::thrift::protocol::TType _etype1368;
- xfer += iprot->readListBegin(_etype1368, _size1365);
- this->tbl_types.resize(_size1365);
- uint32_t _i1369;
- for (_i1369 = 0; _i1369 < _size1365; ++_i1369)
+ uint32_t _size1374;
+ ::apache::thrift::protocol::TType _etype1377;
+ xfer += iprot->readListBegin(_etype1377, _size1374);
+ this->tbl_types.resize(_size1374);
+ uint32_t _i1378;
+ for (_i1378 = 0; _i1378 < _size1374; ++_i1378)
{
- xfer += iprot->readString(this->tbl_types[_i1369]);
+ xfer += iprot->readString(this->tbl_types[_i1378]);
}
xfer += iprot->readListEnd();
}
@@ -9130,10 +9130,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
- std::vector<std::string> ::const_iterator _iter1370;
- for (_iter1370 = this->tbl_types.begin(); _iter1370 != this->tbl_types.end(); ++_iter1370)
+ std::vector<std::string> ::const_iterator _iter1379;
+ for (_iter1379 = this->tbl_types.begin(); _iter1379 != this->tbl_types.end(); ++_iter1379)
{
- xfer += oprot->writeString((*_iter1370));
+ xfer += oprot->writeString((*_iter1379));
}
xfer += oprot->writeListEnd();
}
@@ -9165,10 +9165,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
- std::vector<std::string> ::const_iterator _iter1371;
- for (_iter1371 = (*(this->tbl_types)).begin(); _iter1371 != (*(this->tbl_types)).end(); ++_iter1371)
+ std::vector<std::string> ::const_iterator _iter1380;
+ for (_iter1380 = (*(this->tbl_types)).begin(); _iter1380 != (*(this->tbl_types)).end(); ++_iter1380)
{
- xfer += oprot->writeString((*_iter1371));
+ xfer += oprot->writeString((*_iter1380));
}
xfer += oprot->writeListEnd();
}
@@ -9209,14 +9209,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1372;
- ::apache::thrift::protocol::TType _etype1375;
- xfer += iprot->readListBegin(_etype1375, _size1372);
- this->success.resize(_size1372);
- uint32_t _i1376;
- for (_i1376 = 0; _i1376 < _size1372; ++_i1376)
+ uint32_t _size1381;
+ ::apache::thrift::protocol::TType _etype1384;
+ xfer += iprot->readListBegin(_etype1384, _size1381);
+ this->success.resize(_size1381);
+ uint32_t _i1385;
+ for (_i1385 = 0; _i1385 < _size1381; ++_i1385)
{
- xfer += this->success[_i1376].read(iprot);
+ xfer += this->success[_i1385].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9255,10 +9255,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<TableMeta> ::const_iterator _iter1377;
- for (_iter1377 = this->success.begin(); _iter1377 != this->success.end(); ++_iter1377)
+ std::vector<TableMeta> ::const_iterator _iter1386;
+ for (_iter1386 = this->success.begin(); _iter1386 != this->success.end(); ++_iter1386)
{
- xfer += (*_iter1377).write(oprot);
+ xfer += (*_iter1386).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9303,14 +9303,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1378;
- ::apache::thrift::protocol::TType _etype1381;
- xfer += iprot->readListBegin(_etype1381, _size1378);
- (*(this->success)).resize(_size1378);
- uint32_t _i1382;
- for (_i1382 = 0; _i1382 < _size1378; ++_i1382)
+ uint32_t _size1387;
+ ::apache::thrift::protocol::TType _etype1390;
+ xfer += iprot->readListBegin(_etype1390, _size1387);
+ (*(this->success)).resize(_size1387);
+ uint32_t _i1391;
+ for (_i1391 = 0; _i1391 < _size1387; ++_i1391)
{
- xfer += (*(this->success))[_i1382].read(iprot);
+ xfer += (*(this->success))[_i1391].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9448,14 +9448,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1383;
- ::apache::thrift::protocol::TType _etype1386;
- xfer += iprot->readListBegin(_etype1386, _size1383);
- this->success.resize(_size1383);
- uint32_t _i1387;
- for (_i1387 = 0; _i1387 < _size1383; ++_i1387)
+ uint32_t _size1392;
+ ::apache::thrift::protocol::TType _etype1395;
+ xfer += iprot->readListBegin(_etype1395, _size1392);
+ this->success.resize(_size1392);
+ uint32_t _i1396;
+ for (_i1396 = 0; _i1396 < _size1392; ++_i1396)
{
- xfer += iprot->readString(this->success[_i1387]);
+ xfer += iprot->readString(this->success[_i1396]);
}
xfer += iprot->readListEnd();
}
@@ -9494,10 +9494,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1388;
- for (_iter1388 = this->success.begin(); _iter1388 != this->success.end(); ++_iter1388)
+ std::vector<std::string> ::const_iterator _iter1397;
+ for (_iter1397 = this->success.begin(); _iter1397 != this->success.end(); ++_iter1397)
{
- xfer += oprot->writeString((*_iter1388));
+ xfer += oprot->writeString((*_iter1397));
}
xfer += oprot->writeListEnd();
}
@@ -9542,14 +9542,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1389;
- ::apache::thrift::protocol::TType _etype1392;
- xfer += iprot->readListBegin(_etype1392, _size1389);
- (*(this->success)).resize(_size1389);
- uint32_t _i1393;
- for (_i1393 = 0; _i1393 < _size1389; ++_i1393)
+ uint32_t _size1398;
+ ::apache::thrift::protocol::TType _etype1401;
+ xfer += iprot->readListBegin(_etype1401, _size1398);
+ (*(this->success)).resize(_size1398);
+ uint32_t _i1402;
+ for (_i1402 = 0; _i1402 < _size1398; ++_i1402)
{
- xfer += iprot->readString((*(this->success))[_i1393]);
+ xfer += iprot->readString((*(this->success))[_i1402]);
}
xfer += iprot->readListEnd();
}
@@ -9859,14 +9859,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_names.clear();
- uint32_t _size1394;
- ::apache::thrift::protocol::TType _etype1397;
- xfer += iprot->readListBegin(_etype1397, _size1394);
- this->tbl_names.resize(_size1394);
- uint32_t _i1398;
- for (_i1398 = 0; _i1398 < _size1394; ++_i1398)
+ uint32_t _size1403;
+ ::apache::thrift::protocol::TType _etype1406;
+ xfer += iprot->readListBegin(_etype1406, _size1403);
+ this->tbl_names.resize(_size1403);
+ uint32_t _i1407;
+ for (_i1407 = 0; _i1407 < _size1403; ++_i1407)
{
- xfer += iprot->readString(this->tbl_names[_i1398]);
+ xfer += iprot->readString(this->tbl_names[_i1407]);
}
xfer += iprot->readListEnd();
}
@@ -9899,10 +9899,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
- std::vector<std::string> ::const_iterator _iter1399;
- for (_iter1399 = this->tbl_names.begin(); _iter1399 != this->tbl_names.end(); ++_iter1399)
+ std::vector<std::string> ::const_iterator _iter1408;
+ for (_iter1408 = this->tbl_names.begin(); _iter1408 != this->tbl_names.end(); ++_iter1408)
{
- xfer += oprot->writeString((*_iter1399));
+ xfer += oprot->writeString((*_iter1408));
}
xfer += oprot->writeListEnd();
}
@@ -9930,10 +9930,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
- std::vector<std::string> ::const_iterator _iter1400;
- for (_iter1400 = (*(this->tbl_names)).begin(); _iter1400 != (*(this->tbl_names)).end(); ++_iter1400)
+ std::vector<std::string> ::const_iterator _iter1409;
+ for (_iter1409 = (*(this->tbl_names)).begin(); _iter1409 != (*(this->tbl_names)).end(); ++_iter1409)
{
- xfer += oprot->writeString((*_iter1400));
+ xfer += oprot->writeString((*_iter1409));
}
xfer += oprot->writeListEnd();
}
@@ -9974,14 +9974,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1401;
- ::apache::thrift::protocol::TType _etype1404;
- xfer += iprot->readListBegin(_etype1404, _size1401);
- this->success.resize(_size1401);
- uint32_t _i1405;
- for (_i1405 = 0; _i1405 < _size1401; ++_i1405)
+ uint32_t _size1410;
+ ::apache::thrift::protocol::TType _etype1413;
+ xfer += iprot->readListBegin(_etype1413, _size1410);
+ this->success.resize(_size1410);
+ uint32_t _i1414;
+ for (_i1414 = 0; _i1414 < _size1410; ++_i1414)
{
- xfer += this->success[_i1405].read(iprot);
+ xfer += this->success[_i1414].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10012,10 +10012,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<Table> ::const_iterator _iter1406;
- for (_iter1406 = this->success.begin(); _iter1406 != this->success.end(); ++_iter1406)
+ std::vector<Table> ::const_iterator _iter1415;
+ for (_iter1415 = this->success.begin(); _iter1415 != this->success.end(); ++_iter1415)
{
- xfer += (*_iter1406).write(oprot);
+ xfer += (*_iter1415).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10056,14 +10056,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1407;
- ::apache::thrift::protocol::TType _etype1410;
- xfer += iprot->readListBegin(_etype1410, _size1407);
- (*(this->success)).resize(_size1407);
- uint32_t _i1411;
- for (_i1411 = 0; _i1411 < _size1407; ++_i1411)
+ uint32_t _size1416;
+ ::apache::thrift::protocol::TType _etype1419;
+ xfer += iprot->readListBegin(_etype1419, _size1416);
+ (*(this->success)).resize(_size1416);
+ uint32_t _i1420;
+ for (_i1420 = 0; _i1420 < _size1416; ++_i1420)
{
- xfer += (*(this->success))[_i1411].read(iprot);
+ xfer += (*(this->success))[_i1420].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10596,14 +10596,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_names.clear();
- uint32_t _size1412;
- ::apache::thrift::protocol::TType _etype1415;
- xfer += iprot->readListBegin(_etype1415, _size1412);
- this->tbl_names.resize(_size1412);
- uint32_t _i1416;
- for (_i1416 = 0; _i1416 < _size1412; ++_i1416)
+ uint32_t _size1421;
+ ::apache::thrift::protocol::TType _etype1424;
+ xfer += iprot->readListBegin(_etype1424, _size1421);
+ this->tbl_names.resize(_size1421);
+ uint32_t _i1425;
+ for (_i1425 = 0; _i1425 < _size1421; ++_i1425)
{
- xfer += iprot->readString(this->tbl_names[_i1416]);
+ xfer += iprot->readString(this->tbl_names[_i1425]);
}
xfer += iprot->readListEnd();
}
@@ -10636,10 +10636,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(:
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
- std::vector<std::string> ::const_iterator _iter1417;
- for (_iter1417 = this->tbl_names.begin(); _iter1417 != this->tbl_names.end(); ++_iter1417)
+ std::vector<std::string> ::const_iterator _iter1426;
+ for (_iter1426 = this->tbl_names.begin(); _iter1426 != this->tbl_names.end(); ++_iter1426)
{
- xfer += oprot->writeString((*_iter1417));
+ xfer += oprot->writeString((*_iter1426));
}
xfer += oprot->writeListEnd();
}
@@ -10667,10 +10667,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write(
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
- std::vector<std::string> ::const_iterator _iter1418;
- for (_iter1418 = (*(this->tbl_names)).begin(); _iter1418 != (*(this->tbl_names)).end(); ++_iter1418)
+ std::vector<std::string> ::const_iterator _iter1427;
+ for (_iter1427 = (*(this->tbl_names)).begin(); _iter1427 != (*(this->tbl_names)).end(); ++_iter1427)
{
- xfer += oprot->writeString((*_iter1418));
+ xfer += oprot->writeString((*_iter1427));
}
xfer += oprot->writeListEnd();
}
@@ -10711,17 +10711,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read(
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size1419;
- ::apache::thrift::protocol::TType _ktype1420;
- ::apache::thrift::protocol::TType _vtype1421;
- xfer += iprot->readMapBegin(_ktype1420, _vtype1421, _size1419);
- uint32_t _i1423;
- for (_i1423 = 0; _i1423 < _size1419; ++_i1423)
+ uint32_t _size1428;
+ ::apache::thrift::protocol::TType _ktype1429;
+ ::apache::thrift::protocol::TType _vtype1430;
+ xfer += iprot->readMapBegin(_ktype1429, _vtype1430, _size1428);
+ uint32_t _i1432;
+ for (_i1432 = 0; _i1432 < _size1428; ++_i1432)
{
- std::string _key1424;
- xfer += iprot->readString(_key1424);
- Materialization& _val1425 = this->success[_key1424];
- xfer += _val1425.read(iprot);
+ std::string _key1433;
+ xfer += iprot->readString(_key1433);
+ Materialization& _val1434 = this->success[_key1433];
+ xfer += _val1434.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -10776,11 +10776,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::map<std::string, Materialization> ::const_iterator _iter1426;
- for (_iter1426 = this->success.begin(); _iter1426 != this->success.end(); ++_iter1426)
+ std::map<std::string, Materialization> ::const_iterator _iter1435;
+ for (_iter1435 = this->success.begin(); _iter1435 != this->success.end(); ++_iter1435)
{
- xfer += oprot->writeString(_iter1426->first);
- xfer += _iter1426->second.write(oprot);
+ xfer += oprot->writeString(_iter1435->first);
+ xfer += _iter1435->second.write(oprot);
}
xfer += oprot->writeMapEnd();
}
@@ -10833,17 +10833,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size1427;
- ::apache::thrift::protocol::TType _ktype1428;
- ::apache::thrift::protocol::TType _vtype1429;
- xfer += iprot->readMapBegin(_ktype1428, _vtype1429, _size1427);
- uint32_t _i1431;
- for (_i1431 = 0; _i1431 < _size1427; ++_i1431)
+ uint32_t _size1436;
+ ::apache::thrift::protocol::TType _ktype1437;
+ ::apache::thrift::protocol::TType _vtype1438;
+ xfer += iprot->readMapBegin(_ktype1437, _vtype1438, _size1436);
+ uint32_t _i1440;
+ for (_i1440 = 0; _i1440 < _size1436; ++_i1440)
{
- std::string _key1432;
- xfer += iprot->readString(_key1432);
- Materialization& _val1433 = (*(this->success))[_key1432];
- xfer += _val1433.read(iprot);
+ std::string _key1441;
+ xfer += iprot->readString(_key1441);
+ Materialization& _val1442 = (*(this->success))[_key1441];
+ xfer += _val1442.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -11304,14 +11304,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1434;
- ::apache::thrift::protocol::TType _etype1437;
- xfer += iprot->readListBegin(_etype1437, _size1434);
- this->success.resize(_size1434);
- uint32_t _i1438;
- for (_i1438 = 0; _i1438 < _size1434; ++_i1438)
+ uint32_t _size1443;
+ ::apache::thrift::protocol::TType _etype1446;
+ xfer += iprot->readListBegin(_etype1446, _size1443);
+ this->success.resize(_size1443);
+ uint32_t _i1447;
+ for (_i1447 = 0; _i1447 < _size1443; ++_i1447)
{
- xfer += iprot->readString(this->success[_i1438]);
+ xfer += iprot->readString(this->success[_i1447]);
}
xfer += iprot->readListEnd();
}
@@ -11366,10 +11366,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1439;
- for (_iter1439 = this->success.begin(); _iter1439 != this->success.end(); ++_iter1439)
+ std::vector<std::string> ::const_iterator _iter1448;
+ for (_iter1448 = this->success.begin(); _iter1448 != this->success.end(); ++_iter1448)
{
- xfer += oprot->writeString((*_iter1439));
+ xfer += oprot->writeString((*_iter1448));
}
xfer += oprot->writeListEnd();
}
@@ -11422,14 +11422,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1440;
- ::apache::thrift::protocol::TType _etype1443;
- xfer += iprot->readListBegin(_etype1443, _size1440);
- (*(this->success)).resize(_size1440);
- uint32_t _i1444;
- for (_i1444 = 0; _i1444 < _size1440; ++_i1444)
+ uint32_t _size1449;
+ ::apache::thrift::protocol::TType _etype1452;
+ xfer += iprot->readListBegin(_etype1452, _size1449);
+ (*(this->success)).resize(_size1449);
+ uint32_t _i1453;
+ for (_i1453 = 0; _i1453 < _size1449; ++_i1453)
{
- xfer += iprot->readString((*(this->success))[_i1444]);
+ xfer += iprot->readString((*(this->success))[_i1453]);
}
xfer += iprot->readListEnd();
}
@@ -12763,14 +12763,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size1445;
- ::apache::thrift::protocol::TType _etype1448;
- xfer += iprot->readListBegin(_etype1448, _size1445);
- this->new_parts.resize(_size1445);
- uint32_t _i1449;
- for (_i1449 = 0; _i1449 < _size1445; ++_i1449)
+ uint32_t _size1454;
+ ::apache::thrift::protocol::TType _etype1457;
+ xfer += iprot->readListBegin(_etype1457, _size1454);
+ this->new_parts.resize(_size1454);
+ uint32_t _i1458;
+ for (_i1458 = 0; _i1458 < _size1454; ++_i1458)
{
- xfer += this->new_parts[_i1449].read(iprot);
+ xfer += this->new_parts[_i1458].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -12799,10 +12799,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<Partition> ::const_iterator _iter1450;
- for (_iter1450 = this->new_parts.begin(); _iter1450 != this->new_parts.end(); ++_iter1450)
+ std::vector<Partition> ::const_iterator _iter1459;
+ for (_iter1459 = this->new_parts.begin(); _iter1459 != this->new_parts.end(); ++_iter1459)
{
- xfer += (*_iter1450).write(oprot);
+ xfer += (*_iter1459).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -12826,10 +12826,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<Partition> ::const_iterator _iter1451;
- for (_iter1451 = (*(this->new_parts)).begin(); _iter1451 != (*(this->new_parts)).end(); ++_iter1451)
+ std::vector<Partition> ::const_iterator _iter1460;
+ for (_iter1460 = (*(this->new_parts)).begin(); _iter1460 != (*(this->new_parts)).end(); ++_iter1460)
{
- xfer += (*_iter1451).write(oprot);
+ xfer += (*_iter1460).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -13038,14 +13038,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size1452;
- ::apache::thrift::protocol::TType _etype1455;
- xfer += iprot->readListBegin(_etype1455, _size1452);
- this->new_parts.resize(_size1452);
- uint32_t _i1456;
- for (_i1456 = 0; _i1456 < _size1452; ++_i1456)
+ uint32_t _size1461;
+ ::apache::thrift::protocol::TType _etype1464;
+ xfer += iprot->readListBegin(_etype1464, _size1461);
+ this->new_parts.resize(_size1461);
+ uint32_t _i1465;
+ for (_i1465 = 0; _i1465 < _size1461; ++_i1465)
{
- xfer += this->new_parts[_i1456].read(iprot);
+ xfer += this->new_parts[_i1465].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -13074,10 +13074,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<PartitionSpec> ::const_iterator _iter1457;
- for (_iter1457 = this->new_parts.begin(); _iter1457 != this->new_parts.end(); ++_iter1457)
+ std::vector<PartitionSpec> ::const_iterator _iter1466;
+ for (_iter1466 = this->new_parts.begin(); _iter1466 != this->new_parts.end(); ++_iter1466)
{
- xfer += (*_iter1457).write(oprot);
+ xfer += (*_iter1466).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -13101,10 +13101,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<PartitionSpec> ::const_iterator _iter1458;
- for (_iter1458 = (*(this->new_parts)).begin(); _iter1458 != (*(this->new_parts)).end(); ++_iter1458)
+ std::vector<PartitionSpec> ::const_iterator _iter1467;
+ for (_iter1467 = (*(this->new_parts)).begin(); _iter1467 != (*(this->new_parts)).end(); ++_iter1467)
{
- xfer += (*_iter1458).write(oprot);
+ xfer += (*_iter1467).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -13329,14 +13329,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1459;
- ::apache::thrift::protocol::TType _etype1462;
- xfer += iprot->readListBegin(_etype1462, _size1459);
- this->part_vals.resize(_size1459);
- uint32_t _i1463;
- for (_i1463 = 0; _i1463 < _size1459; ++_i1463)
+ uint32_t _size1468;
+ ::apache::thrift::protocol::TType _etype1471;
+ xfer += iprot->readListBegin(_etype1471, _size1468);
+ this->part_vals.resize(_size1468);
+ uint32_t _i1472;
+ for (_i1472 = 0; _i1472 < _size1468; ++_i1472)
{
- xfer += iprot->readString(this->part_vals[_i1463]);
+ xfer += iprot->readString(this->part_vals[_i1472]);
}
xfer += iprot->readListEnd();
}
@@ -13373,10 +13373,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1464;
- for (_iter1464 = this->part_vals.begin(); _iter1464 != this->part_vals.end(); ++_iter1464)
+ std::vector<std::string> ::const_iterator _iter1473;
+ for (_iter1473 = this->part_vals.begin(); _iter1473 != this->part_vals.end(); ++_iter1473)
{
- xfer += oprot->writeString((*_iter1464));
+ xfer += oprot->writeString((*_iter1473));
}
xfer += oprot->writeListEnd();
}
@@ -13408,10 +13408,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1465;
- for (_iter1465 = (*(this->part_vals)).begin(); _iter1465 != (*(this->part_vals)).end(); ++_iter1465)
+ std::vector<std::string> ::const_iterator _iter1474;
+ for (_iter1474 = (*(this->part_vals)).begin(); _iter1474 != (*(this->part_vals)).end(); ++_iter1474)
{
- xfer += oprot->writeString((*_iter1465));
+ xfer += oprot->writeString((*_iter1474));
}
xfer += oprot->writeListEnd();
}
@@ -13883,14 +13883,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1466;
- ::apache::thrift::protocol::TType _etype1469;
- xfer += iprot->readListBegin(_etype1469, _size1466);
- this->part_vals.resize(_size1466);
- uint32_t _i1470;
- for (_i1470 = 0; _i1470 < _size1466; ++_i1470)
+ uint32_t _size1475;
+ ::apache::thrift::protocol::TType _etype1478;
+ xfer += iprot->readListBegin(_etype1478, _size1475);
+ this->part_vals.resize(_size1475);
+ uint32_t _i1479;
+ for (_i1479 = 0; _i1479 < _size1475; ++_i1479)
{
- xfer += iprot->readString(this->part_vals[_i1470]);
+ xfer += iprot->readString(this->part_vals[_i1479]);
}
xfer += iprot->readListEnd();
}
@@ -13935,10 +13935,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1471;
- for (_iter1471 = this->part_vals.begin(); _iter1471 != this->part_vals.end(); ++_iter1471)
+ std::vector<std::string> ::const_iterator _iter1480;
+ for (_iter1480 = this->part_vals.begin(); _iter1480 != this->part_vals.end(); ++_iter1480)
{
- xfer += oprot->writeString((*_iter1471));
+ xfer += oprot->writeString((*_iter1480));
}
xfer += oprot->writeListEnd();
}
@@ -13974,10 +13974,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1472;
- for (_iter1472 = (*(this->part_vals)).begin(); _iter1472 != (*(this->part_vals)).end(); ++_iter1472)
+ std::vector<std::string> ::const_iterator _iter1481;
+ for (_iter1481 = (*(this->part_vals)).begin(); _iter1481 != (*(this->part_vals)).end(); ++_iter1481)
{
- xfer += oprot->writeString((*_iter1472));
+ xfer += oprot->writeString((*_iter1481));
}
xfer += oprot->writeListEnd();
}
@@ -14780,14 +14780,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1473;
- ::apache::thrift::protocol::TType _etype1476;
- xfer += iprot->readListBegin(_etype1476, _size1473);
- this->part_vals.resize(_size1473);
- uint32_t _i1477;
- for (_i1477 = 0; _i1477 < _size1473; ++_i1477)
+ uint32_t _size1482;
+ ::apache::thrift::protocol::TType _etype1485;
+ xfer += iprot->readListBegin(_etype1485, _size1482);
+ this->part_vals.resize(_size1482);
+ uint32_t _i1486;
+ for (_i1486 = 0; _i1486 < _size1482; ++_i1486)
{
- xfer += iprot->readString(this->part_vals[_i1477]);
+ xfer += iprot->readString(this->part_vals[_i1486]);
}
xfer += iprot->readListEnd();
}
@@ -14832,10 +14832,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1478;
- for (_iter1478 = this->part_vals.begin(); _iter1478 != this->part_vals.end(); ++_iter1478)
+ std::vector<std::string> ::const_iterator _iter1487;
+ for (_iter1487 = this->part_vals.begin(); _iter1487 != this->part_vals.end(); ++_iter1487)
{
- xfer += oprot->writeString((*_iter1478));
+ xfer += oprot->writeString((*_iter1487));
}
xfer += oprot->writeListEnd();
}
@@ -14871,10 +14871,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1479;
- for (_iter1479 = (*(this->part_vals)).begin(); _iter1479 != (*(this->part_vals)).end(); ++_iter1479)
+ std::vector<std::string> ::const_iterator _iter1488;
+ for (_iter1488 = (*(this->part_vals)).begin(); _iter1488 != (*(this->part_vals)).end(); ++_iter1488)
{
- xfer += oprot->writeString((*_iter1479));
+ xfer += oprot->writeString((*_iter1488));
}
xfer += oprot->writeListEnd();
}
@@ -15083,14 +15083,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1480;
- ::apache::thrift::protocol::TType _etype1483;
- xfer += iprot->readListBegin(_etype1483, _size1480);
- this->part_vals.resize(_size1480);
- uint32_t _i1484;
- for (_i1484 = 0; _i1484 < _size1480; ++_i1484)
+ uint32_t _size1489;
+ ::apache::thrift::protocol::TType _etype1492;
+ xfer += iprot->readListBegin(_etype1492, _size1489);
+ this->part_vals.resize(_size1489);
+ uint32_t _i1493;
+ for (_i1493 = 0; _i1493 < _size1489; ++_i1493)
{
- xfer += iprot->readString(this->part_vals[_i1484]);
+ xfer += iprot->readString(this->part_vals[_i1493]);
}
xfer += iprot->readListEnd();
}
@@ -15143,10 +15143,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1485;
- for (_iter1485 = this->part_vals.begin(); _iter1485 != this->part_vals.end(); ++_iter1485)
+ std::vector<std::string> ::const_iterator _iter1494;
+ for (_iter1494 = this->part_vals.begin(); _iter1494 != this->part_vals.end(); ++_iter1494)
{
- xfer += oprot->writeString((*_iter1485));
+ xfer += oprot->writeString((*_iter1494));
}
xfer += oprot->writeListEnd();
}
@@ -15186,10 +15186,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1486;
- for (_iter1486 = (*(this->part_vals)).begin(); _iter1486 != (*(this->part_vals)).end(); ++_iter1486)
+ std::vector<std::string> ::const_iterator _iter1495;
+ for (_iter1495 = (*(this->part_vals)).begin(); _iter1495 != (*(this->part_vals)).end(); ++_iter1495)
{
- xfer += oprot->writeString((*_iter1486));
+ xfer += oprot->writeString((*_iter1495));
}
xfer += oprot->writeListEnd();
}
@@ -16195,14 +16195,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1487;
- ::apache::thrift::protocol::TType _etype1490;
- xfer += iprot->readListBegin(_etype1490, _size1487);
- this->part_vals.resize(_size1487);
- uint32_t _i1491;
- for (_i1491 = 0; _i1491 < _size1487; ++_i1491)
+ uint32_t _size1496;
+ ::apache::thrift::protocol::TType _etype1499;
+ xfer += iprot->readListBegin(_etype1499, _size1496);
+ this->part_vals.resize(_size1496);
+ uint32_t _i1500;
+ for (_i1500 = 0; _i1500 < _size1496; ++_i1500)
{
- xfer += iprot->readString(this->part_vals[_i1491]);
+ xfer += iprot->readString(this->part_vals[_i1500]);
}
xfer += iprot->readListEnd();
}
@@ -16239,10 +16239,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1492;
- for (_iter1492 = this->part_vals.begin(); _iter1492 != this->part_vals.end(); ++_iter1492)
+ std::vector<std::string> ::const_iterator _iter1501;
+ for (_iter1501 = this->part_vals.begin(); _iter1501 != this->part_vals.end(); ++_iter1501)
{
- xfer += oprot->writeString((*_iter1492));
+ xfer += oprot->writeString((*_iter1501));
}
xfer += oprot->writeListEnd();
}
@@ -16274,10 +16274,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1493;
- for (_iter1493 = (*(this->part_vals)).begin(); _iter1493 != (*(this->part_vals)).end(); ++_iter1493)
+ std::vector<std::string> ::const_iterator _iter1502;
+ for (_iter1502 = (*(this->part_vals)).begin(); _iter1502 != (*(this->part_vals)).end(); ++_iter1502)
{
- xfer += oprot->writeString((*_iter1493));
+ xfer += oprot->writeString((*_iter1502));
}
xfer += oprot->writeListEnd();
<TRUNCATED>
[13/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
HIVE-19532: 03 patch
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/be303958
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/be303958
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/be303958
Branch: refs/heads/master-txnstats
Commit: be30395873a007d377522d8d10a604ee14daf4ef
Parents: 5a9a328
Author: sergey <se...@apache.org>
Authored: Fri Jun 15 13:09:35 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Fri Jun 15 13:09:35 2018 -0700
----------------------------------------------------------------------
.../listener/DummyRawStoreFailEvent.java | 45 +-
pom.xml | 2 +-
.../hive/ql/exec/ColumnStatsUpdateTask.java | 3 +
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 3 +-
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 129 +-
.../hadoop/hive/ql/lockmgr/DbTxnManager.java | 13 +
.../hadoop/hive/ql/lockmgr/DummyTxnManager.java | 6 +
.../hadoop/hive/ql/lockmgr/HiveTxnManager.java | 10 +
.../apache/hadoop/hive/ql/metadata/Hive.java | 296 +-
.../hive/ql/optimizer/StatsOptimizer.java | 50 +-
.../hive/ql/stats/BasicStatsNoJobTask.java | 4 +-
.../hadoop/hive/ql/stats/BasicStatsTask.java | 15 +-
.../hadoop/hive/ql/stats/ColStatsProcessor.java | 7 +
.../test/queries/clientpositive/stats_nonpart.q | 53 +
ql/src/test/queries/clientpositive/stats_part.q | 98 +
.../test/queries/clientpositive/stats_part2.q | 100 +
.../test/queries/clientpositive/stats_sizebug.q | 37 +
.../results/clientpositive/stats_nonpart.q.out | 325 +
.../results/clientpositive/stats_part.q.out | 650 ++
.../results/clientpositive/stats_part2.q.out | 1598 +++++
.../results/clientpositive/stats_sizebug.q.out | 210 +
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 2342 +++----
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 36 +-
.../ThriftHiveMetastore_server.skeleton.cpp | 2 +-
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 6099 ++++++++++--------
.../gen/thrift/gen-cpp/hive_metastore_types.h | 297 +-
.../metastore/api/AddPartitionsRequest.java | 215 +-
.../hive/metastore/api/AddPartitionsResult.java | 126 +-
.../hadoop/hive/metastore/api/AggrStats.java | 124 +-
.../hive/metastore/api/ColumnStatistics.java | 335 +-
.../hive/metastore/api/GetTableRequest.java | 219 +-
.../hive/metastore/api/GetTableResult.java | 124 +-
.../metastore/api/IsolationLevelCompliance.java | 48 +
.../hadoop/hive/metastore/api/Partition.java | 333 +-
.../hive/metastore/api/PartitionSpec.java | 337 +-
.../metastore/api/PartitionsStatsRequest.java | 219 +-
.../metastore/api/PartitionsStatsResult.java | 124 +-
.../api/SetPartitionsStatsRequest.java | 215 +-
.../apache/hadoop/hive/metastore/api/Table.java | 333 +-
.../hive/metastore/api/TableStatsRequest.java | 219 +-
.../hive/metastore/api/TableStatsResult.java | 124 +-
.../hive/metastore/api/ThriftHiveMetastore.java | 242 +-
.../gen-php/metastore/ThriftHiveMetastore.php | 58 +-
.../src/gen/thrift/gen-php/metastore/Types.php | 632 ++
.../hive_metastore/ThriftHiveMetastore-remote | 8 +-
.../hive_metastore/ThriftHiveMetastore.py | 44 +-
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 396 +-
.../gen/thrift/gen-rb/hive_metastore_types.rb | 117 +-
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 16 +-
.../hadoop/hive/metastore/AlterHandler.java | 2 +-
.../hadoop/hive/metastore/HiveAlterHandler.java | 20 +-
.../hadoop/hive/metastore/HiveMetaStore.java | 100 +-
.../hive/metastore/HiveMetaStoreClient.java | 112 +-
.../hadoop/hive/metastore/IHMSHandler.java | 5 +
.../hadoop/hive/metastore/IMetaStoreClient.java | 45 +-
.../hadoop/hive/metastore/ObjectStore.java | 462 +-
.../apache/hadoop/hive/metastore/RawStore.java | 150 +-
.../hive/metastore/cache/CachedStore.java | 132 +-
.../hive/metastore/conf/MetastoreConf.java | 4 +-
.../hadoop/hive/metastore/model/MPartition.java | 18 +-
.../model/MPartitionColumnStatistics.java | 9 +
.../hadoop/hive/metastore/model/MTable.java | 19 +
.../metastore/model/MTableColumnStatistics.java | 9 +
.../metastore/txn/CompactionTxnHandler.java | 64 +-
.../hadoop/hive/metastore/txn/TxnDbUtil.java | 94 +
.../hadoop/hive/metastore/txn/TxnHandler.java | 8 +-
.../hadoop/hive/metastore/txn/TxnUtils.java | 20 +-
.../src/main/resources/package.jdo | 18 +
.../main/sql/derby/hive-schema-3.0.0.derby.sql | 11 +-
.../main/sql/derby/hive-schema-4.0.0.derby.sql | 10 +-
.../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql | 8 +-
.../main/sql/mssql/hive-schema-3.0.0.mssql.sql | 14 +-
.../main/sql/mssql/hive-schema-4.0.0.mssql.sql | 14 +-
.../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql | 8 +
.../main/sql/mysql/hive-schema-3.0.0.mysql.sql | 6 +
.../main/sql/mysql/hive-schema-4.0.0.mysql.sql | 6 +
.../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql | 2 +-
.../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql | 8 +
.../sql/oracle/hive-schema-3.0.0.oracle.sql | 15 +-
.../sql/oracle/hive-schema-4.0.0.oracle.sql | 14 +-
.../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql | 2 +-
.../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql | 7 +
.../sql/postgres/hive-schema-3.0.0.postgres.sql | 19 +-
.../sql/postgres/hive-schema-4.0.0.postgres.sql | 14 +-
.../upgrade-3.1.0-to-4.0.0.postgres.sql | 8 +
.../src/main/thrift/hive_metastore.thrift | 61 +-
.../DummyRawStoreControlledCommit.java | 104 +-
.../DummyRawStoreForJdoConnection.java | 99 +-
.../HiveMetaStoreClientPreCatalog.java | 96 +-
.../metastore/client/TestAlterPartitions.java | 3 +-
.../hadoop/hive/common/ValidTxnWriteIdList.java | 4 +
91 files changed, 14212 insertions(+), 4650 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 8f9a03f..498b2c6 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -266,6 +266,12 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public Table getTable(String catName, String dbName, String tableName,
+ long txnId, String writeIdList) throws MetaException {
+ return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList);
+ }
+
+ @Override
public boolean addPartition(Partition part)
throws InvalidObjectException, MetaException {
return objectStore.addPartition(part);
@@ -278,6 +284,13 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public Partition getPartition(String catName, String dbName, String tableName,
+ List<String> partVals, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList);
+ }
+
+ @Override
public boolean dropPartition(String catName, String dbName, String tableName, List<String> partVals)
throws MetaException, NoSuchObjectException,
InvalidObjectException, InvalidInputException {
@@ -376,9 +389,10 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
@Override
public void alterPartitions(String catName, String dbName, String tblName,
- List<List<String>> partValsList, List<Partition> newParts)
+ List<List<String>> partValsList, List<Partition> newParts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
- objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
+ objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList);
}
@Override
@@ -685,6 +699,14 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+ List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
@@ -778,6 +800,17 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName,
+ String tblName, List<String> colNames,
+ List<String> partNames,
+ long txnId,
+ String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionColumnStatistics(
+ catName, dbName, tblName , colNames, partNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean doesPartitionExist(String catName, String dbName, String tableName,
List<FieldSchema> partKeys, List<String> partVals)
throws MetaException, NoSuchObjectException {
@@ -855,6 +888,14 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public AggrStats get_aggr_stats_for(String catName, String dbName,
+ String tblName, List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException {
+ return null;
+ }
+
+ @Override
public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
return objectStore.getNextNotification(rqst);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 5202248..4278104 100644
--- a/pom.xml
+++ b/pom.xml
@@ -66,7 +66,7 @@
</modules>
<properties>
- <hive.version.shortname>3.1.0</hive.version.shortname>
+ <hive.version.shortname>4.0.0</hive.version.shortname>
<!-- Build Properties -->
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
index a53ff5a..7795c66 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
@@ -46,11 +46,14 @@ import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index e069499..e82fb10 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -1301,8 +1301,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
throw new AssertionError("Unsupported alter materialized view type! : " + alterMVDesc.getOp());
}
- db.alterTable(mv, environmentContext);
-
+ db.alterTable(mv,environmentContext);
return 0;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 7fce67f..1961313 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -33,6 +33,7 @@ import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
+import org.apache.avro.generic.GenericData;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -40,13 +41,11 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
-import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
-import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
-import org.apache.hadoop.hive.common.ValidWriteIdList;
+import org.apache.hadoop.hive.common.*;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
+import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.DataOperationType;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -57,9 +56,12 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater;
import org.apache.hadoop.hive.ql.io.orc.Reader;
import org.apache.hadoop.hive.ql.io.orc.Writer;
+import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
+import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
import org.apache.hadoop.hive.ql.plan.TableScanDesc;
+import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
import org.apache.hadoop.hive.shims.ShimLoader;
@@ -1621,6 +1623,121 @@ public class AcidUtils {
}
}
+ public static class TableSnapshot {
+ private long txnId;
+ private String validWriteIdList;
+
+ public TableSnapshot() {
+ }
+
+ public TableSnapshot(long txnId, String validWriteIdList) {
+ this.txnId = txnId;
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public String getValidWriteIdList() {
+ return validWriteIdList;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+ }
+
+ /**
+ * Create a TableShopshot with the given "conf"
+ * for the table of the given "tbl".
+ *
+ * @param conf
+ * @param tbl
+ * @return TableSnapshot on success, null on failure
+ * @throws LockException
+ */
+ public static TableSnapshot getTableSnapshot(
+ Configuration conf,
+ Table tbl) throws LockException {
+ if (!isTransactionalTable(tbl)) {
+ return null;
+ } else {
+ long txnId = 0;
+ ValidWriteIdList validWriteIdList = null;
+
+ HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr();
+
+ if (sessionTxnMgr != null) {
+ txnId = sessionTxnMgr.getCurrentTxnId();
+ }
+ String fullTableName = getFullTableName(tbl.getDbName(), tbl.getTableName());
+ if (txnId > 0) {
+ validWriteIdList =
+ getTableValidWriteIdList(conf, fullTableName);
+
+ if (validWriteIdList == null) {
+ validWriteIdList = getTableValidWriteIdListWithTxnList(
+ conf, tbl.getDbName(), tbl.getTableName());
+ }
+ }
+ return new TableSnapshot(txnId,
+ validWriteIdList != null ? validWriteIdList.toString() : null);
+ }
+ }
+
+ /**
+ * Returns ValidWriteIdList for the table with the given "dbName" and "tableName".
+ * This is called when HiveConf has no list for the table.
+ * Otherwise use getTableSnapshot().
+ * @param conf Configuration
+ * @param dbName
+ * @param tableName
+ * @return ValidWriteIdList on success, null on failure to get a list.
+ * @throws LockException
+ */
+ public static ValidWriteIdList getTableValidWriteIdListWithTxnList(
+ Configuration conf, String dbName, String tableName) throws LockException {
+ HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr();
+ if (sessionTxnMgr == null) {
+ return null;
+ }
+ ValidWriteIdList validWriteIdList = null;
+ ValidTxnWriteIdList validTxnWriteIdList = null;
+
+ String validTxnList = conf.get(ValidTxnList.VALID_TXNS_KEY);
+ List<String> tablesInput = new ArrayList<>();
+ String fullTableName = getFullTableName(dbName, tableName);
+ tablesInput.add(fullTableName);
+
+ validTxnWriteIdList = sessionTxnMgr.getValidWriteIds(tablesInput, validTxnList);
+ return validTxnWriteIdList != null ?
+ validTxnWriteIdList.getTableValidWriteIdList(fullTableName) : null;
+ }
+
+ /**
+ * Return table writeId for the table with the given "dbName.tableName"
+ * for the current transaction.
+ * @param conf
+ * @param dbName
+ * @param tableName
+ * @return 0 if the current transaction does not write to the table
+ * @throws LockException
+ */
+ public static long getAllocatedTableWriteId(
+ Configuration conf, String dbName, String tableName)
+ throws LockException {
+ HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr();
+ long writeId = 0;
+ if (sessionTxnMgr != null) {
+ writeId = sessionTxnMgr.getAllocatedTableWriteId(dbName, tableName);
+ }
+ return writeId;
+ }
public static String getFullTableName(String dbName, String tableName) {
return dbName.toLowerCase() + "." + tableName.toLowerCase();
}
@@ -1908,8 +2025,8 @@ public class AcidUtils {
}
public static boolean isAcidEnabled(HiveConf hiveConf) {
- String txnMgr = hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER);
- boolean concurrency = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
+ String txnMgr = hiveConf.getVar(ConfVars.HIVE_TXN_MANAGER);
+ boolean concurrency = hiveConf.getBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY);
String dbTxnMgr = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager";
if (txnMgr.equals(dbTxnMgr) && concurrency) {
return true;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 4fd1d4e..d92bb91 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -1017,9 +1017,22 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
@Override
public long getTableWriteId(String dbName, String tableName) throws LockException {
assert isTxnOpen();
+ return getTableWriteId(dbName, tableName, true);
+ }
+
+ @Override
+ public long getAllocatedTableWriteId(String dbName, String tableName) throws LockException {
+ assert isTxnOpen();
+ return getTableWriteId(dbName, tableName, false);
+ }
+
+ private long getTableWriteId(
+ String dbName, String tableName, boolean allocateIfNotYet) throws LockException {
String fullTableName = AcidUtils.getFullTableName(dbName, tableName);
if (tableWriteIds.containsKey(fullTableName)) {
return tableWriteIds.get(fullTableName);
+ } else if (!allocateIfNotYet) {
+ return 0;
}
try {
long writeId = getMS().allocateTableWriteId(txnId, dbName, tableName);
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
index ab9d67e..2398419 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
@@ -77,6 +77,12 @@ class DummyTxnManager extends HiveTxnManagerImpl {
public long getTableWriteId(String dbName, String tableName) throws LockException {
return 0L;
}
+
+ @Override
+ public long getAllocatedTableWriteId(String dbName, String tableName) throws LockException {
+ return 0L;
+ }
+
@Override
public void replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy,
List<TxnToWriteId> srcTxnToWriteIdList) throws LockException {
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
index 5f68e08..28e2ac5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
@@ -277,6 +277,16 @@ public interface HiveTxnManager {
*/
long getTableWriteId(String dbName, String tableName) throws LockException;
+ /**
+ * if {@code isTxnOpen()}, returns the already allocated table write ID of the table with
+ * the given "dbName.tableName" for the current active transaction.
+ * @param dbName
+ * @param tableName
+ * @return 0 if not yet allocated
+ * @throws LockException
+ */
+ public long getAllocatedTableWriteId(String dbName, String tableName) throws LockException;
+
/**
* Allocates write id for each transaction in the list.
* @param dbName database name
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 2ec131e..3fc4649 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -63,21 +63,13 @@ import javax.jdo.JDODataStoreException;
import com.google.common.collect.ImmutableList;
import org.apache.calcite.plan.RelOptMaterialization;
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptRuleCall;
import org.apache.calcite.plan.hep.HepPlanner;
import org.apache.calcite.plan.hep.HepProgramBuilder;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.RelVisitor;
import org.apache.calcite.rel.core.Project;
import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeField;
import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.tools.RelBuilder;
import org.apache.commons.io.FilenameUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileChecksum;
@@ -87,13 +79,7 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.common.ObjectPair;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
-import org.apache.hadoop.hive.common.ValidWriteIdList;
+import org.apache.hadoop.hive.common.*;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
import org.apache.hadoop.hive.common.log.InPlaceUpdate;
@@ -114,60 +100,7 @@ import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.ReplChangeManager;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CompactionResponse;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FireEventRequest;
-import org.apache.hadoop.hive.metastore.api.FireEventRequestData;
-import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.HiveObjectType;
-import org.apache.hadoop.hive.metastore.api.InsertEventRequestData;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.Materialization;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMPool;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator;
@@ -180,7 +113,6 @@ import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.log.PerfLogger;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentMaterializationRule;
import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
@@ -202,7 +134,6 @@ import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
-import org.apache.hive.common.util.TxnIdUtils;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -652,6 +583,12 @@ public class Hive {
alterTable(newTbl.getDbName(), newTbl.getTableName(), newTbl, false, environmentContext);
}
+
+ public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext)
+ throws HiveException {
+ alterTable(fullyQlfdTblName, newTbl, false, environmentContext);
+ }
+
/**
* Updates the existing table metadata with the new metadata.
*
@@ -659,13 +596,17 @@ public class Hive {
* name of the existing table
* @param newTbl
* new name of the table. could be the old name
+ * @param transactional
+ * Need to generate and save a table snapshot into the metastore?
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
* @throws TException
*/
- public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext)
+ public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext,
+ boolean transactional)
throws HiveException {
- alterTable(fullyQlfdTblName, newTbl, false, environmentContext);
+ String[] names = Utilities.getDbTableName(fullyQlfdTblName);
+ alterTable(names[0], names[1], newTbl, false, environmentContext, transactional);
}
public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext)
@@ -673,9 +614,13 @@ public class Hive {
String[] names = Utilities.getDbTableName(fullyQlfdTblName);
alterTable(names[0], names[1], newTbl, cascade, environmentContext);
}
-
public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade,
- EnvironmentContext environmentContext)
+ EnvironmentContext environmentContext)
+ throws HiveException {
+ alterTable(dbName, tblName, newTbl, cascade, environmentContext, false);
+ }
+ public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade,
+ EnvironmentContext environmentContext, boolean transactional)
throws HiveException {
try {
@@ -690,6 +635,12 @@ public class Hive {
if (cascade) {
environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
}
+
+ // Take a table snapshot and set it to newTbl.
+ if (transactional) {
+ setTableSnapshotForTransactionalTable(conf, newTbl);
+ }
+
getMSC().alter_table_with_environmentContext(dbName, tblName, newTbl.getTTable(), environmentContext);
} catch (MetaException e) {
throw new HiveException("Unable to alter table. " + e.getMessage(), e);
@@ -739,6 +690,29 @@ public class Hive {
*/
public void alterPartition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
throws InvalidOperationException, HiveException {
+ alterPartition(dbName, tblName, newPart, environmentContext, true);
+ }
+
+ /**
+ * Updates the existing partition metadata with the new metadata.
+ *
+ * @param dbName
+ * name of the exiting table's database
+ * @param tblName
+ * name of the existing table
+ * @param newPart
+ * new partition
+ * @param environmentContext
+ * environment context for the method
+ * @param transactional
+ * indicates this call is for transaction stats
+ * @throws InvalidOperationException
+ * if the changes in metadata is not acceptable
+ * @throws TException
+ */
+ public void alterPartition(String dbName, String tblName, Partition newPart,
+ EnvironmentContext environmentContext, boolean transactional)
+ throws InvalidOperationException, HiveException {
try {
validatePartition(newPart);
String location = newPart.getLocation();
@@ -746,6 +720,9 @@ public class Hive {
location = Utilities.getQualifiedPath(conf, new Path(location));
newPart.setLocation(location);
}
+ if (transactional) {
+ setTableSnapshotForTransactionalPartition(conf, newPart);
+ }
getSynchronizedMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext);
} catch (MetaException e) {
@@ -763,6 +740,10 @@ public class Hive {
newPart.checkValidity();
}
+ public void alterPartitions(String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
+ throws InvalidOperationException, HiveException {
+ alterPartitions(tblName, newParts, environmentContext, false);
+ }
/**
* Updates the existing table metadata with the new metadata.
*
@@ -770,16 +751,23 @@ public class Hive {
* name of the existing table
* @param newParts
* new partitions
+ * @param transactional
+ * Need to generate and save a table snapshot into the metastore?
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
* @throws TException
*/
- public void alterPartitions(String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
+ public void alterPartitions(String tblName, List<Partition> newParts,
+ EnvironmentContext environmentContext, boolean transactional)
throws InvalidOperationException, HiveException {
String[] names = Utilities.getDbTableName(tblName);
List<org.apache.hadoop.hive.metastore.api.Partition> newTParts =
new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>();
try {
+ AcidUtils.TableSnapshot tableSnapshot = null;
+ if (transactional) {
+ tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable());
+ }
// Remove the DDL time so that it gets refreshed
for (Partition tmpPart: newParts) {
if (tmpPart.getParameters() != null) {
@@ -792,7 +780,9 @@ public class Hive {
}
newTParts.add(tmpPart.getTPartition());
}
- getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext);
+ getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext,
+ tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
} catch (MetaException e) {
throw new HiveException("Unable to alter partition. " + e.getMessage(), e);
} catch (TException e) {
@@ -923,6 +913,8 @@ public class Hive {
tTbl.setPrivileges(principalPrivs);
}
}
+ // Set table snapshot to api.Table to make it persistent.
+ setTableSnapshotForTransactionalTable(conf, tbl);
if (primaryKeys == null && foreignKeys == null
&& uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null
&& checkConstraints == null) {
@@ -1125,7 +1117,27 @@ public class Hive {
* @throws HiveException
*/
public Table getTable(final String dbName, final String tableName,
- boolean throwException) throws HiveException {
+ boolean throwException) throws HiveException {
+ return this.getTable(dbName, tableName, throwException, false);
+ }
+
+ /**
+ * Returns metadata of the table
+ *
+ * @param dbName
+ * the name of the database
+ * @param tableName
+ * the name of the table
+ * @param throwException
+ * controls whether an exception is thrown or a returns a null
+ * @param checkTransactional
+ * checks whether the metadata table stats are valid (or
+ * compilant with the snapshot isolation of) for the current transaction.
+ * @return the table or if throwException is false a null value.
+ * @throws HiveException
+ */
+ public Table getTable(final String dbName, final String tableName,
+ boolean throwException, boolean checkTransactional) throws HiveException {
if (tableName == null || tableName.equals("")) {
throw new HiveException("empty table creation??");
@@ -1134,7 +1146,19 @@ public class Hive {
// Get the table from metastore
org.apache.hadoop.hive.metastore.api.Table tTable = null;
try {
- tTable = getMSC().getTable(dbName, tableName);
+ if (checkTransactional) {
+ ValidWriteIdList validWriteIdList = null;
+ long txnId = SessionState.get().getTxnMgr() != null ?
+ SessionState.get().getTxnMgr().getCurrentTxnId() : 0;
+ if (txnId > 0) {
+ validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf,
+ dbName, tableName);
+ }
+ tTable = getMSC().getTable(dbName, tableName, txnId,
+ validWriteIdList != null ? validWriteIdList.toString() : null);
+ } else {
+ tTable = getMSC().getTable(dbName, tableName);
+ }
} catch (NoSuchObjectException e) {
if (throwException) {
LOG.error("Table " + dbName + "." + tableName + " not found: " + e.getMessage());
@@ -2424,8 +2448,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
*/
public Partition createPartition(Table tbl, Map<String, String> partSpec) throws HiveException {
try {
- return new Partition(tbl, getMSC().add_partition(
- Partition.createMetaPartitionObject(tbl, partSpec, null)));
+ org.apache.hadoop.hive.metastore.api.Partition part =
+ Partition.createMetaPartitionObject(tbl, partSpec, null);
+ AcidUtils.TableSnapshot tableSnapshot =
+ AcidUtils.getTableSnapshot(conf, tbl);
+ part.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0);
+ part.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ return new Partition(tbl, getMSC().add_partition(part));
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
@@ -2437,8 +2466,16 @@ private void constructOneLBLocationMap(FileStatus fSta,
int size = addPartitionDesc.getPartitionCount();
List<org.apache.hadoop.hive.metastore.api.Partition> in =
new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>(size);
+ AcidUtils.TableSnapshot tableSnapshot =
+ AcidUtils.getTableSnapshot(conf, tbl);
for (int i = 0; i < size; ++i) {
- in.add(convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf));
+ org.apache.hadoop.hive.metastore.api.Partition tmpPart =
+ convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf);
+ if (tmpPart != null && tableSnapshot != null && tableSnapshot.getTxnId() > 0) {
+ tmpPart.setTxnId(tableSnapshot.getTxnId());
+ tmpPart.setValidWriteIdList(tableSnapshot.getValidWriteIdList());
+ }
+ in.add(tmpPart);
}
List<Partition> out = new ArrayList<Partition>();
try {
@@ -2633,7 +2670,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
fullName = tbl.getFullyQualifiedName();
}
- alterPartition(fullName, new Partition(tbl, tpart), null);
+ Partition newPart = new Partition(tbl, tpart);
+ alterPartition(fullName, newPart, null);
}
private void alterPartitionSpecInMemory(Table tbl,
@@ -4359,8 +4397,16 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
}
- public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException {
+ public boolean setPartitionColumnStatistics(
+ SetPartitionsStatsRequest request) throws HiveException {
try {
+ ColumnStatistics colStat = request.getColStats().get(0);
+ ColumnStatisticsDesc statsDesc = colStat.getStatsDesc();
+ Table tbl = getTable(statsDesc.getDbName(), statsDesc.getTableName());
+
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
+ request.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0);
+ request.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
return getMSC().setPartitionColumnStatistics(request);
} catch (Exception e) {
LOG.debug(StringUtils.stringifyException(e));
@@ -4370,8 +4416,27 @@ private void constructOneLBLocationMap(FileStatus fSta,
public List<ColumnStatisticsObj> getTableColumnStatistics(
String dbName, String tableName, List<String> colNames) throws HiveException {
+ return getTableColumnStatistics(dbName, tableName, colNames, false);
+ }
+
+ public List<ColumnStatisticsObj> getTableColumnStatistics(
+ String dbName, String tableName, List<String> colNames, boolean checkTransactional)
+ throws HiveException {
+
+ List<ColumnStatisticsObj> retv = null;
try {
- return getMSC().getTableColumnStatistics(dbName, tableName, colNames);
+ if (checkTransactional) {
+ Table tbl = getTable(dbName, tableName);
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
+ if (tableSnapshot.getTxnId() > 0) {
+ retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames,
+ tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ }
+ } else {
+ retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames);
+ }
+ return retv;
} catch (Exception e) {
LOG.debug(StringUtils.stringifyException(e));
throw new HiveException(e);
@@ -4380,8 +4445,25 @@ private void constructOneLBLocationMap(FileStatus fSta,
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(String dbName,
String tableName, List<String> partNames, List<String> colNames) throws HiveException {
- try {
- return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames);
+ return getPartitionColumnStatistics(dbName, tableName, partNames, colNames, false);
+ }
+
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String dbName, String tableName, List<String> partNames, List<String> colNames,
+ boolean checkTransactional)
+ throws HiveException {
+ long txnId = -1;
+ String writeIdList = null;
+ try {
+ if (checkTransactional) {
+ Table tbl = getTable(dbName, tableName);
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
+ txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1;
+ writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null;
+ }
+
+ return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames,
+ txnId, writeIdList);
} catch (Exception e) {
LOG.debug(StringUtils.stringifyException(e));
throw new HiveException(e);
@@ -4390,8 +4472,22 @@ private void constructOneLBLocationMap(FileStatus fSta,
public AggrStats getAggrColStatsFor(String dbName, String tblName,
List<String> colNames, List<String> partName) {
- try {
- return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName);
+ return getAggrColStatsFor(dbName, tblName, colNames, partName, false);
+ }
+
+ public AggrStats getAggrColStatsFor(String dbName, String tblName,
+ List<String> colNames, List<String> partName, boolean checkTransactional) {
+ long txnId = -1;
+ String writeIdList = null;
+ try {
+ if (checkTransactional) {
+ Table tbl = getTable(dbName, tblName);
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
+ txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1;
+ writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null;
+ }
+ return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName,
+ txnId, writeIdList);
} catch (Exception e) {
LOG.debug(StringUtils.stringifyException(e));
return new AggrStats(new ArrayList<ColumnStatisticsObj>(),0);
@@ -5189,4 +5285,26 @@ private void constructOneLBLocationMap(FileStatus fSta,
throw new HiveException(e);
}
}
+
+ private void setTableSnapshotForTransactionalTable(
+ HiveConf conf, Table newTbl)
+ throws LockException {
+
+ org.apache.hadoop.hive.metastore.api.Table newTTbl = newTbl.getTTable();
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, newTbl);
+
+ newTTbl.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1);
+ newTTbl.setValidWriteIdList(
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ }
+
+ private void setTableSnapshotForTransactionalPartition(HiveConf conf, Partition partition)
+ throws LockException {
+
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, partition.getTable());
+ org.apache.hadoop.hive.metastore.api.Partition tpartition = partition.getTPartition();
+ tpartition.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1);
+ tpartition.setValidWriteIdList(
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 857f300..5f8754a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.lib.Rule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
+import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -282,7 +283,17 @@ public class StatsOptimizer extends Transform {
// limit. In order to be safe, we do not use it now.
return null;
}
+
+ Hive hive = Hive.get(pctx.getConf());
Table tbl = tsOp.getConf().getTableMetadata();
+ boolean isTransactionalTable = AcidUtils.isTransactionalTable(tbl);
+
+ // If the table is transactional, get stats state by calling getTable() with
+ // transactional flag on to check the validity of table stats.
+ if (isTransactionalTable) {
+ tbl = hive.getTable(tbl.getDbName(), tbl.getTableName(), true, true);
+ }
+
if (MetaStoreUtils.isExternalTable(tbl.getTTable())) {
Logger.info("Table " + tbl.getTableName() + " is external. Skip StatsOptimizer.");
return null;
@@ -375,7 +386,8 @@ public class StatsOptimizer extends Transform {
List<Object> oneRow = new ArrayList<Object>();
- Hive hive = Hive.get(pctx.getConf());
+ AcidUtils.TableSnapshot tableSnapshot =
+ AcidUtils.getTableSnapshot(pctx.getConf(), tbl);
for (AggregationDesc aggr : pgbyOp.getConf().getAggregators()) {
if (aggr.getDistinct()) {
@@ -462,8 +474,13 @@ public class StatsOptimizer extends Transform {
+ " are not up to date.");
return null;
}
- List<ColumnStatisticsObj> stats = hive.getMSC().getTableColumnStatistics(
- tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName));
+
+ List<ColumnStatisticsObj> stats =
+ hive.getMSC().getTableColumnStatistics(
+ tbl.getDbName(), tbl.getTableName(),
+ Lists.newArrayList(colName),
+ tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
if (stats.isEmpty()) {
Logger.debug("No stats for " + tbl.getTableName() + " column " + colName);
return null;
@@ -523,8 +540,13 @@ public class StatsOptimizer extends Transform {
+ " are not up to date.");
return null;
}
- List<ColumnStatisticsObj> stats = hive.getMSC().getTableColumnStatistics(
- tbl.getDbName(),tbl.getTableName(), Lists.newArrayList(colName));
+
+ List<ColumnStatisticsObj> stats =
+ hive.getMSC().getTableColumnStatistics(
+ tbl.getDbName(), tbl.getTableName(),
+ Lists.newArrayList(colName),
+ tableSnapshot.getTxnId(),
+ tableSnapshot.getValidWriteIdList());
if (stats.isEmpty()) {
Logger.debug("No stats for " + tbl.getTableName() + " column " + colName);
return null;
@@ -664,9 +686,12 @@ public class StatsOptimizer extends Transform {
+ " are not up to date.");
return null;
}
- ColumnStatisticsData statData = hive.getMSC().getTableColumnStatistics(
- tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName))
- .get(0).getStatsData();
+ ColumnStatisticsData statData =
+ hive.getMSC().getTableColumnStatistics(
+ tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName),
+ tableSnapshot.getTxnId(),
+ tableSnapshot.getValidWriteIdList())
+ .get(0).getStatsData();
String name = colDesc.getTypeString().toUpperCase();
switch (type) {
case Integer: {
@@ -887,7 +912,7 @@ public class StatsOptimizer extends Transform {
}
private Collection<List<ColumnStatisticsObj>> verifyAndGetPartColumnStats(
- Hive hive, Table tbl, String colName, Set<Partition> parts) throws TException {
+ Hive hive, Table tbl, String colName, Set<Partition> parts) throws TException, LockException {
List<String> partNames = new ArrayList<String>(parts.size());
for (Partition part : parts) {
if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(part.getTable(), part.getParameters(), colName)) {
@@ -897,8 +922,13 @@ public class StatsOptimizer extends Transform {
}
partNames.add(part.getName());
}
+ AcidUtils.TableSnapshot tableSnapshot =
+ AcidUtils.getTableSnapshot(hive.getConf(), tbl);
+
Map<String, List<ColumnStatisticsObj>> result = hive.getMSC().getPartitionColumnStatistics(
- tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName));
+ tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName),
+ tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
if (result.size() != parts.size()) {
Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions");
return null;
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
index d4d46a3..9a271a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
@@ -344,12 +344,12 @@ public class BasicStatsNoJobTask implements IStatsProcessor {
}
if (values.get(0).result instanceof Table) {
- db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext);
+ db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext, true);
LOG.debug("Updated stats for {}.", tableFullName);
} else {
if (values.get(0).result instanceof Partition) {
List<Partition> results = Lists.transform(values, FooterStatCollector.EXTRACT_RESULT_FUNCTION);
- db.alterPartitions(tableFullName, results, environmentContext);
+ db.alterPartitions(tableFullName, results, environmentContext, true);
LOG.debug("Bulk updated {} partitions of {}.", results.size(), tableFullName);
} else {
throw new RuntimeException("inconsistent");
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
index 8c23887..0a2992d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
@@ -127,10 +127,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
public Object process(StatsAggregator statsAggregator) throws HiveException, MetaException {
Partish p = partish;
Map<String, String> parameters = p.getPartParameters();
- if (p.isTransactionalTable()) {
- // TODO: this should also happen on any error. Right now this task will just fail.
- StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
- } else if (work.isTargetRewritten()) {
+ if (work.isTargetRewritten()) {
StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
}
@@ -208,12 +205,6 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
private void updateStats(StatsAggregator statsAggregator, Map<String, String> parameters,
String aggKey, boolean isFullAcid) throws HiveException {
for (String statType : StatsSetupConst.statsRequireCompute) {
- if (isFullAcid && !work.isTargetRewritten()) {
- // Don't bother with aggregation in this case, it will probably be invalid.
- parameters.remove(statType);
- continue;
- }
-
String value = statsAggregator.aggregateStats(aggKey, statType);
if (value != null && !value.isEmpty()) {
long longValue = Long.parseLong(value);
@@ -272,7 +263,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
if (res == null) {
return 0;
}
- db.alterTable(tableFullName, res, environmentContext);
+ db.alterTable(tableFullName, res, environmentContext, true);
if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
console.printInfo("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']');
@@ -340,7 +331,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
}
if (!updates.isEmpty()) {
- db.alterPartitions(tableFullName, updates, environmentContext);
+ db.alterPartitions(tableFullName, updates, environmentContext, true);
}
if (work.isStatsReliable() && updates.size() != processors.size()) {
LOG.info("Stats should be reliadble...however seems like there were some issue.. => ret 1");
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
index d4cfd0a..acebf52 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
@@ -34,12 +34,14 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.FetchOperator;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
@@ -176,6 +178,11 @@ public class ColStatsProcessor implements IStatsProcessor {
}
SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats);
request.setNeedMerge(colStatDesc.isNeedMerge());
+ if (AcidUtils.isTransactionalTable(tbl) && SessionState.get().getTxnMgr() != null) {
+ request.setTxnId(SessionState.get().getTxnMgr().getCurrentTxnId());
+ request.setValidWriteIdList(AcidUtils.getTableValidWriteIdList(conf,
+ AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())).toString());
+ }
db.setPartitionColumnStatistics(request);
return 0;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/test/queries/clientpositive/stats_nonpart.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_nonpart.q b/ql/src/test/queries/clientpositive/stats_nonpart.q
new file mode 100644
index 0000000..b1a4876
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_nonpart.q
@@ -0,0 +1,53 @@
+set hive.stats.dbclass=fs;
+set hive.stats.fetch.column.stats=true;
+set datanucleus.cache.collections=false;
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+set hive.compute.query.using.stats=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.query.results.cache.enabled=false;
+
+-- create source.
+drop table if exists mysource;
+create table mysource (p int,key int);
+insert into mysource values (100,20), (101,40), (102,50);
+insert into mysource values (100,30), (101,50), (102,60);
+
+-- test nonpartitioned table
+drop table if exists stats_nonpartitioned;
+
+create table stats_nonpartitioned(key int, value int) stored as orc;
+--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true");
+--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+
+
+explain select count(*) from stats_nonpartitioned;
+select count(*) from stats_nonpartitioned;
+desc formatted stats_nonpartitioned;
+
+explain insert into table stats_nonpartitioned select * from mysource where p == 100;
+insert into table stats_nonpartitioned select * from mysource where p == 100;
+
+desc formatted stats_nonpartitioned;
+
+explain select count(*) from stats_nonpartitioned;
+select count(*) from stats_nonpartitioned;
+explain select count(key) from stats_nonpartitioned;
+select count(key) from stats_nonpartitioned;
+
+--analyze table stats_nonpartitioned compute statistics;
+analyze table stats_nonpartitioned compute statistics for columns key, value;
+
+explain select count(*) from stats_nonpartitioned;
+select count(*) from stats_nonpartitioned;
+explain select count(key) from stats_nonpartitioned;
+select count(key) from stats_nonpartitioned;
+
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/test/queries/clientpositive/stats_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_part.q b/ql/src/test/queries/clientpositive/stats_part.q
new file mode 100644
index 0000000..29f8a15
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_part.q
@@ -0,0 +1,98 @@
+set hive.stats.dbclass=fs;
+set hive.stats.fetch.column.stats=true;
+set datanucleus.cache.collections=false;
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+set hive.compute.query.using.stats=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.query.results.cache.enabled=false;
+
+-- create source.
+drop table if exists mysource;
+create table mysource (p int, key int, value int);
+insert into mysource values (100,20,201), (101,40,401), (102,50,501);
+insert into mysource values (100,21,211), (101,41,411), (102,51,511);
+
+--explain select count(*) from mysource;
+--select count(*) from mysource;
+
+-- Gather col stats manually
+--analyze table mysource compute statistics for columns p, key;
+
+--explain select count(*) from mysource;
+--select count(*) from mysource;
+--explain select count(key) from mysource;
+--select count(key) from mysource;
+
+-- test partitioned table
+drop table if exists stats_partitioned;
+
+create table stats_part(key int,value string) partitioned by (p int) stored as orc;
+--create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true");
+--create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+
+explain select count(key) from stats_part;
+--select count(*) from stats_part;
+--explain select count(*) from stats_part where p = 100;
+--select count(*) from stats_part where p = 100;
+explain select count(key) from stats_part where p > 100;
+--select count(*) from stats_part where p > 100;
+desc formatted stats_part;
+
+--explain insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
+insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
+insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101;
+insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102;
+
+desc formatted stats_part;
+
+insert into table mysource values (103,20,200), (103,83,832), (103,53,530);
+insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102;
+
+desc formatted stats_part;
+show partitions stats_part;
+
+explain select count(*) from stats_part;
+select count(*) from stats_part;
+explain select count(key) from stats_part;
+select count(key) from stats_part;
+explain select count(key) from stats_part where p > 100;
+select count(key) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+--update stats_part set key = key + 100 where key in(-50,40) and p > 100;
+desc formatted stats_part;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+select count(value) from stats_part;
+--update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100;
+select count(value) from stats_part;
+
+--delete from stats_part where key in (20, 41);
+desc formatted stats_part;
+
+explain select count(*) from stats_part where p = 100;
+select count(*) from stats_part where p = 100;
+explain select count(*) from stats_part where p > 100;
+select count(*) from stats_part where p > 100;
+explain select count(key) from stats_part;
+select count(key) from stats_part;
+explain select count(*) from stats_part where p > 100;
+select count(*) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+describe extended stats_part partition (p=101);
+describe extended stats_part;
+
+
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/test/queries/clientpositive/stats_part2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_part2.q b/ql/src/test/queries/clientpositive/stats_part2.q
new file mode 100644
index 0000000..24be218
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_part2.q
@@ -0,0 +1,100 @@
+set hive.stats.dbclass=fs;
+set hive.stats.fetch.column.stats=true;
+set datanucleus.cache.collections=false;
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+set hive.compute.query.using.stats=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.query.results.cache.enabled=false;
+
+-- create source.
+drop table if exists mysource;
+create table mysource (p int, key int, value string);
+insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50');
+insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51');
+
+-- test partitioned table
+drop table if exists stats_partitioned;
+
+--create table stats_part(key int,value string) partitioned by (p int) stored as orc;
+create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true");
+--create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+
+--explain select count(*) from stats_part;
+--select count(*) from stats_part;
+--explain select count(*) from stats_part where p = 100;
+--select count(*) from stats_part where p = 100;
+explain select count(*) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+--select count(*) from stats_part where p > 100;
+desc formatted stats_part;
+
+--explain insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
+insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
+insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101;
+insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102;
+
+desc formatted stats_part;
+explain select count(key) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+
+insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53');
+insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102;
+
+desc formatted stats_part;
+show partitions stats_part;
+
+explain select count(*) from stats_part;
+select count(*) from stats_part;
+explain select count(key) from stats_part;
+select count(key) from stats_part;
+explain select count(key) from stats_part where p > 100;
+select count(key) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+desc formatted stats_part partition(p = 100);
+desc formatted stats_part partition(p = 101);
+desc formatted stats_part partition(p = 102);
+update stats_part set key = key + 100 where key in(-50,40) and p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+desc formatted stats_part partition(p = 100);
+desc formatted stats_part partition(p = 101);
+desc formatted stats_part partition(p = 102);
+
+select count(value) from stats_part;
+update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100;
+desc formatted stats_part partition(p = 100);
+desc formatted stats_part partition(p = 101);
+desc formatted stats_part partition(p = 102);
+select count(value) from stats_part;
+
+delete from stats_part where key in (20, 41);
+desc formatted stats_part partition(p = 100);
+desc formatted stats_part partition(p = 101);
+desc formatted stats_part partition(p = 102);
+
+explain select count(*) from stats_part where p = 100;
+select count(*) from stats_part where p = 100;
+explain select count(*) from stats_part where p > 100;
+select count(*) from stats_part where p > 100;
+explain select count(key) from stats_part;
+select count(key) from stats_part;
+explain select count(*) from stats_part where p > 100;
+select count(*) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+describe extended stats_part partition (p=101);
+describe extended stats_part;
+
+
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/test/queries/clientpositive/stats_sizebug.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_sizebug.q b/ql/src/test/queries/clientpositive/stats_sizebug.q
new file mode 100644
index 0000000..6923fe0
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_sizebug.q
@@ -0,0 +1,37 @@
+set hive.stats.dbclass=fs;
+set hive.stats.fetch.column.stats=true;
+set datanucleus.cache.collections=false;
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+set hive.compute.query.using.stats=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.query.results.cache.enabled=false;
+
+-- create source.
+drop table if exists mysource;
+create table mysource (p int,key int);
+insert into mysource values (100,20), (101,40), (102,50);
+insert into mysource values (100,20), (101,40), (102,50);
+
+-- test nonpartitioned table
+drop table if exists stats_nonpartitioned;
+
+create table stats_nonpartitioned(key int, value int) stored as orc;
+--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true");
+--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+explain insert into table stats_nonpartitioned select * from mysource where p == 100;
+insert into table stats_nonpartitioned select * from mysource where p == 100;
+
+desc formatted stats_nonpartitioned;
+analyze table mysource compute statistics for columns p, key;
+desc formatted stats_nonpartitioned;
+
+
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/ql/src/test/results/clientpositive/stats_nonpart.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_nonpart.q.out b/ql/src/test/results/clientpositive/stats_nonpart.q.out
new file mode 100644
index 0000000..0449707
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_nonpart.q.out
@@ -0,0 +1,325 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int,key int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int,key int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: insert into mysource values (100,30), (101,50), (102,60)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,30), (101,50), (102,60)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: drop table if exists stats_nonpartitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_nonpartitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_nonpartitioned
+PREHOOK: query: explain select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+0
+PREHOOK: query: desc formatted stats_nonpartitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: query: desc formatted stats_nonpartitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+# col_name data_type comment
+key int
+value int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ bucketing_version 2
+ numFiles 0
+ numRows 0
+ rawDataSize 0
+ totalSize 0
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: mysource
+ Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: (p = 100) (type: boolean)
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: 100 (type: int), key (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.stats_nonpartitioned
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: int)
+ outputColumnNames: key, value
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.stats_nonpartitioned
+
+ Stage: Stage-2
+ Stats Work
+ Basic Stats Work:
+ Column Stats Desc:
+ Columns: key, value
+ Column Types: int, int
+ Table: default.stats_nonpartitioned
+
+PREHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: Lineage: stats_nonpartitioned.key SIMPLE []
+POSTHOOK: Lineage: stats_nonpartitioned.value SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: desc formatted stats_nonpartitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: query: desc formatted stats_nonpartitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+# col_name data_type comment
+key int
+value int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ bucketing_version 2
+ numFiles 1
+ numRows 2
+ rawDataSize 16
+ totalSize 280
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+2
+PREHOOK: query: explain select count(key) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+2
+PREHOOK: query: analyze table stats_nonpartitioned compute statistics for columns key, value
+PREHOOK: type: ANALYZE_TABLE
+PREHOOK: Input: default@stats_nonpartitioned
+PREHOOK: Output: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table stats_nonpartitioned compute statistics for columns key, value
+POSTHOOK: type: ANALYZE_TABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: Output: default@stats_nonpartitioned
+#### A masked pattern was here ####
+PREHOOK: query: explain select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+2
+PREHOOK: query: explain select count(key) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+2
[06/13] hive git commit: HIVE-19532: 03 patch
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
index 3c88d8f..821049e 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
@@ -42,6 +42,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -53,13 +55,17 @@ import org.slf4j.LoggerFactory;
private String tblName; // required
private ClientCapabilities capabilities; // optional
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
DB_NAME((short)1, "dbName"),
TBL_NAME((short)2, "tblName"),
CAPABILITIES((short)3, "capabilities"),
- CAT_NAME((short)4, "catName");
+ CAT_NAME((short)4, "catName"),
+ TXN_ID((short)5, "txnId"),
+ VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -82,6 +88,10 @@ import org.slf4j.LoggerFactory;
return CAPABILITIES;
case 4: // CAT_NAME
return CAT_NAME;
+ case 5: // TXN_ID
+ return TXN_ID;
+ case 6: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -122,7 +132,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME};
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -134,11 +146,17 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class)));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap);
}
public GetTableRequest() {
+ this.txnId = -1L;
+
}
public GetTableRequest(
@@ -154,6 +172,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public GetTableRequest(GetTableRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDbName()) {
this.dbName = other.dbName;
}
@@ -166,6 +185,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public GetTableRequest deepCopy() {
@@ -178,6 +201,9 @@ import org.slf4j.LoggerFactory;
this.tblName = null;
this.capabilities = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public String getDbName() {
@@ -272,6 +298,51 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -306,6 +377,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -323,6 +410,12 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -342,6 +435,10 @@ import org.slf4j.LoggerFactory;
return isSetCapabilities();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -395,6 +492,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -422,6 +537,16 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -473,6 +598,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -528,6 +673,22 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -558,6 +719,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -615,6 +778,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 5: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 6: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -652,6 +831,18 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -678,13 +869,25 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(1);
}
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetTxnId()) {
+ optionals.set(2);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(3);
+ }
+ oprot.writeBitSet(optionals, 4);
if (struct.isSetCapabilities()) {
struct.capabilities.write(oprot);
}
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -694,7 +897,7 @@ import org.slf4j.LoggerFactory;
struct.setDbNameIsSet(true);
struct.tblName = iprot.readString();
struct.setTblNameIsSet(true);
- BitSet incoming = iprot.readBitSet(2);
+ BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
struct.capabilities = new ClientCapabilities();
struct.capabilities.read(iprot);
@@ -704,6 +907,14 @@ import org.slf4j.LoggerFactory;
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(2)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(3)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
index 968e250..80aff92 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
@@ -39,6 +39,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableResult");
private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -47,10 +48,16 @@ import org.slf4j.LoggerFactory;
}
private Table table; // required
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- TABLE((short)1, "table");
+ TABLE((short)1, "table"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -67,6 +74,8 @@ import org.slf4j.LoggerFactory;
switch(fieldId) {
case 1: // TABLE
return TABLE;
+ case 2: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -107,11 +116,14 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
+ private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableResult.class, metaDataMap);
}
@@ -133,6 +145,9 @@ import org.slf4j.LoggerFactory;
if (other.isSetTable()) {
this.table = new Table(other.table);
}
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public GetTableResult deepCopy() {
@@ -142,6 +157,7 @@ import org.slf4j.LoggerFactory;
@Override
public void clear() {
this.table = null;
+ this.isStatsCompliant = null;
}
public Table getTable() {
@@ -167,6 +183,37 @@ import org.slf4j.LoggerFactory;
}
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case TABLE:
@@ -177,6 +224,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -185,6 +240,9 @@ import org.slf4j.LoggerFactory;
case TABLE:
return getTable();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -198,6 +256,8 @@ import org.slf4j.LoggerFactory;
switch (field) {
case TABLE:
return isSetTable();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -224,6 +284,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -236,6 +305,11 @@ import org.slf4j.LoggerFactory;
if (present_table)
list.add(table);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -257,6 +331,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -284,6 +368,16 @@ import org.slf4j.LoggerFactory;
sb.append(this.table);
}
first = false;
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -343,6 +437,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 2: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -361,6 +463,13 @@ import org.slf4j.LoggerFactory;
struct.table.write(oprot);
oprot.writeFieldEnd();
}
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -379,6 +488,14 @@ import org.slf4j.LoggerFactory;
public void write(org.apache.thrift.protocol.TProtocol prot, GetTableResult struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
struct.table.write(oprot);
+ BitSet optionals = new BitSet();
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -387,6 +504,11 @@ import org.slf4j.LoggerFactory;
struct.table = new Table();
struct.table.read(iprot);
struct.setTableIsSet(true);
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java
new file mode 100644
index 0000000..cb2559f
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum IsolationLevelCompliance implements org.apache.thrift.TEnum {
+ YES(1),
+ NO(2),
+ UNKNOWN(3);
+
+ private final int value;
+
+ private IsolationLevelCompliance(int value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the integer value of this enum value, as defined in the Thrift IDL.
+ */
+ public int getValue() {
+ return value;
+ }
+
+ /**
+ * Find a the enum type by its integer value, as defined in the Thrift IDL.
+ * @return null if the value is not found.
+ */
+ public static IsolationLevelCompliance findByValue(int value) {
+ switch (value) {
+ case 1:
+ return YES;
+ case 2:
+ return NO;
+ case 3:
+ return UNKNOWN;
+ default:
+ return null;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
index 51f809a..5b40d2f 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
@@ -47,6 +47,9 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)7);
private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)8);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)10);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)11);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)12);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -63,6 +66,9 @@ import org.slf4j.LoggerFactory;
private Map<String,String> parameters; // required
private PrincipalPrivilegeSet privileges; // optional
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -74,7 +80,14 @@ import org.slf4j.LoggerFactory;
SD((short)6, "sd"),
PARAMETERS((short)7, "parameters"),
PRIVILEGES((short)8, "privileges"),
- CAT_NAME((short)9, "catName");
+ CAT_NAME((short)9, "catName"),
+ TXN_ID((short)10, "txnId"),
+ VALID_WRITE_ID_LIST((short)11, "validWriteIdList"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)12, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -107,6 +120,12 @@ import org.slf4j.LoggerFactory;
return PRIVILEGES;
case 9: // CAT_NAME
return CAT_NAME;
+ case 10: // TXN_ID
+ return TXN_ID;
+ case 11: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
+ case 12: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -149,8 +168,9 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __CREATETIME_ISSET_ID = 0;
private static final int __LASTACCESSTIME_ISSET_ID = 1;
+ private static final int __TXNID_ISSET_ID = 2;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME};
+ private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -175,11 +195,19 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class)));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Partition.class, metaDataMap);
}
public Partition() {
+ this.txnId = -1L;
+
}
public Partition(
@@ -233,6 +261,13 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.catName);
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public Partition deepCopy() {
@@ -252,6 +287,10 @@ import org.slf4j.LoggerFactory;
this.parameters = null;
this.privileges = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
+ this.isStatsCompliant = null;
}
public int getValuesSize() {
@@ -485,6 +524,82 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case VALUES:
@@ -559,6 +674,30 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -591,6 +730,15 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -620,6 +768,12 @@ import org.slf4j.LoggerFactory;
return isSetPrivileges();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -718,6 +872,33 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -770,6 +951,21 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -871,6 +1067,36 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -958,6 +1184,32 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -1105,6 +1357,30 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 10: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 11: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 12: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -1178,6 +1454,25 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -1223,7 +1518,16 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(8);
}
- oprot.writeBitSet(optionals, 9);
+ if (struct.isSetTxnId()) {
+ optionals.set(9);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(10);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(11);
+ }
+ oprot.writeBitSet(optionals, 12);
if (struct.isSetValues()) {
{
oprot.writeI32(struct.values.size());
@@ -1264,12 +1568,21 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(9);
+ BitSet incoming = iprot.readBitSet(12);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
@@ -1328,6 +1641,18 @@ import org.slf4j.LoggerFactory;
struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
struct.setCatNameIsSet(true);
}
+ if (incoming.get(9)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(10)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
+ if (incoming.get(11)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
index 247fdaa..bc625b0 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
@@ -44,6 +44,9 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField SHARED_SDPARTITION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sharedSDPartitionSpec", org.apache.thrift.protocol.TType.STRUCT, (short)4);
private static final org.apache.thrift.protocol.TField PARTITION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionList", org.apache.thrift.protocol.TType.STRUCT, (short)5);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)9);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -57,6 +60,9 @@ import org.slf4j.LoggerFactory;
private PartitionSpecWithSharedSD sharedSDPartitionSpec; // optional
private PartitionListComposingSpec partitionList; // optional
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -65,7 +71,14 @@ import org.slf4j.LoggerFactory;
ROOT_PATH((short)3, "rootPath"),
SHARED_SDPARTITION_SPEC((short)4, "sharedSDPartitionSpec"),
PARTITION_LIST((short)5, "partitionList"),
- CAT_NAME((short)6, "catName");
+ CAT_NAME((short)6, "catName"),
+ TXN_ID((short)7, "txnId"),
+ VALID_WRITE_ID_LIST((short)8, "validWriteIdList"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)9, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -92,6 +105,12 @@ import org.slf4j.LoggerFactory;
return PARTITION_LIST;
case 6: // CAT_NAME
return CAT_NAME;
+ case 7: // TXN_ID
+ return TXN_ID;
+ case 8: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
+ case 9: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -132,7 +151,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME};
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -148,11 +169,19 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionListComposingSpec.class)));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionSpec.class, metaDataMap);
}
public PartitionSpec() {
+ this.txnId = -1L;
+
}
public PartitionSpec(
@@ -170,6 +199,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public PartitionSpec(PartitionSpec other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDbName()) {
this.dbName = other.dbName;
}
@@ -188,6 +218,13 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public PartitionSpec deepCopy() {
@@ -202,6 +239,10 @@ import org.slf4j.LoggerFactory;
this.sharedSDPartitionSpec = null;
this.partitionList = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
+ this.isStatsCompliant = null;
}
public String getDbName() {
@@ -342,6 +383,82 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -392,6 +509,30 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -415,6 +556,15 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -438,6 +588,12 @@ import org.slf4j.LoggerFactory;
return isSetPartitionList();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -509,6 +665,33 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -546,6 +729,21 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -617,6 +815,36 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -690,6 +918,32 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -715,6 +969,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -789,6 +1045,30 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 7: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 8: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 9: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -838,6 +1118,25 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -874,7 +1173,16 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(5);
}
- oprot.writeBitSet(optionals, 6);
+ if (struct.isSetTxnId()) {
+ optionals.set(6);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(7);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(8);
+ }
+ oprot.writeBitSet(optionals, 9);
if (struct.isSetDbName()) {
oprot.writeString(struct.dbName);
}
@@ -893,12 +1201,21 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(6);
+ BitSet incoming = iprot.readBitSet(9);
if (incoming.get(0)) {
struct.dbName = iprot.readString();
struct.setDbNameIsSet(true);
@@ -925,6 +1242,18 @@ import org.slf4j.LoggerFactory;
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(6)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(7)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
+ if (incoming.get(8)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
index 91cf567..a298b89 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
@@ -43,6 +43,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3);
private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -55,6 +57,8 @@ import org.slf4j.LoggerFactory;
private List<String> colNames; // required
private List<String> partNames; // required
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -62,7 +66,9 @@ import org.slf4j.LoggerFactory;
TBL_NAME((short)2, "tblName"),
COL_NAMES((short)3, "colNames"),
PART_NAMES((short)4, "partNames"),
- CAT_NAME((short)5, "catName");
+ CAT_NAME((short)5, "catName"),
+ TXN_ID((short)6, "txnId"),
+ VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -87,6 +93,10 @@ import org.slf4j.LoggerFactory;
return PART_NAMES;
case 5: // CAT_NAME
return CAT_NAME;
+ case 6: // TXN_ID
+ return TXN_ID;
+ case 7: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -127,7 +137,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.CAT_NAME};
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -143,11 +155,17 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap);
}
public PartitionsStatsRequest() {
+ this.txnId = -1L;
+
}
public PartitionsStatsRequest(
@@ -167,6 +185,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public PartitionsStatsRequest(PartitionsStatsRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDbName()) {
this.dbName = other.dbName;
}
@@ -184,6 +203,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public PartitionsStatsRequest deepCopy() {
@@ -197,6 +220,9 @@ import org.slf4j.LoggerFactory;
this.colNames = null;
this.partNames = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public String getDbName() {
@@ -344,6 +370,51 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -386,6 +457,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -406,6 +493,12 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -427,6 +520,10 @@ import org.slf4j.LoggerFactory;
return isSetPartNames();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -489,6 +586,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -521,6 +636,16 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -582,6 +707,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -643,6 +788,22 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -678,6 +839,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -762,6 +925,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 6: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 7: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -816,6 +995,18 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -853,10 +1044,22 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(0);
}
- oprot.writeBitSet(optionals, 1);
+ if (struct.isSetTxnId()) {
+ optionals.set(1);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -888,11 +1091,19 @@ import org.slf4j.LoggerFactory;
}
}
struct.setPartNamesIsSet(true);
- BitSet incoming = iprot.readBitSet(1);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(1)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/be303958/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
index 4caec8f..2414399 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
@@ -39,6 +39,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult");
private static final org.apache.thrift.protocol.TField PART_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("partStats", org.apache.thrift.protocol.TType.MAP, (short)1);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -47,10 +48,16 @@ import org.slf4j.LoggerFactory;
}
private Map<String,List<ColumnStatisticsObj>> partStats; // required
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- PART_STATS((short)1, "partStats");
+ PART_STATS((short)1, "partStats"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -67,6 +74,8 @@ import org.slf4j.LoggerFactory;
switch(fieldId) {
case 1: // PART_STATS
return PART_STATS;
+ case 2: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -107,6 +116,7 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
+ private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -115,6 +125,8 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsResult.class, metaDataMap);
}
@@ -151,6 +163,9 @@ import org.slf4j.LoggerFactory;
}
this.partStats = __this__partStats;
}
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public PartitionsStatsResult deepCopy() {
@@ -160,6 +175,7 @@ import org.slf4j.LoggerFactory;
@Override
public void clear() {
this.partStats = null;
+ this.isStatsCompliant = null;
}
public int getPartStatsSize() {
@@ -196,6 +212,37 @@ import org.slf4j.LoggerFactory;
}
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case PART_STATS:
@@ -206,6 +253,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -214,6 +269,9 @@ import org.slf4j.LoggerFactory;
case PART_STATS:
return getPartStats();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -227,6 +285,8 @@ import org.slf4j.LoggerFactory;
switch (field) {
case PART_STATS:
return isSetPartStats();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -253,6 +313,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -265,6 +334,11 @@ import org.slf4j.LoggerFactory;
if (present_partStats)
list.add(partStats);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -286,6 +360,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -313,6 +397,16 @@ import org.slf4j.LoggerFactory;
sb.append(this.partStats);
}
first = false;
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -391,6 +485,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 2: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -424,6 +526,13 @@ import org.slf4j.LoggerFactory;
}
oprot.writeFieldEnd();
}
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -455,6 +564,14 @@ import org.slf4j.LoggerFactory;
}
}
}
+ BitSet optionals = new BitSet();
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -483,6 +600,11 @@ import org.slf4j.LoggerFactory;
}
}
struct.setPartStatsIsSet(true);
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}