You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kr...@apache.org on 2023/01/06 06:09:16 UTC
[hive] branch master updated: HIVE-26719: Ability to set number of buckets manually (Laszlo Vegh, reviewed by Krisztian Kasa)
This is an automated email from the ASF dual-hosted git repository.
krisztiankasa pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new f5b96a85bba HIVE-26719: Ability to set number of buckets manually (Laszlo Vegh, reviewed by Krisztian Kasa)
f5b96a85bba is described below
commit f5b96a85bba9b07b4b05ecc3f0f3736b44c407be
Author: veghlaci05 <lv...@cloudera.com>
AuthorDate: Fri Jan 6 07:09:04 2023 +0100
HIVE-26719: Ability to set number of buckets manually (Laszlo Vegh, reviewed by Krisztian Kasa)
---
.../org/apache/hadoop/hive/common/FileUtils.java | 5 -
.../ql/txn/compactor/TestCrudCompactorOnTez.java | 136 ++++++++++++++++++---
.../upgrade/hive/hive-schema-4.0.0.hive.sql | 15 ++-
.../hadoop/hive/ql/parse/AlterClauseParser.g | 4 +-
.../org/apache/hadoop/hive/ql/parse/HiveParser.g | 8 ++
.../storage/compact/AlterTableCompactAnalyzer.java | 11 +-
.../storage/compact/AlterTableCompactDesc.java | 10 +-
.../compact/AlterTableCompactOperation.java | 3 +
.../concatenate/AlterTableConcatenateAnalyzer.java | 2 +-
.../org/apache/hadoop/hive/ql/exec/Utilities.java | 5 -
.../hadoop/hive/ql/txn/compactor/MRCompactor.java | 82 +++++++------
.../ql/txn/compactor/RebalanceQueryCompactor.java | 11 +-
.../hadoop/hive/ql/txn/compactor/Worker.java | 7 ++
.../clientpositive/llap/dbtxnmgr_compact1.q.out | 1 +
.../clientpositive/llap/dbtxnmgr_compact2.q.out | 1 +
.../test/results/clientpositive/llap/sysdb.q.out | 7 +-
.../tez/acid_vectorization_original_tez.q.out | 1 +
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 44 +++++++
.../src/gen/thrift/gen-cpp/hive_metastore_types.h | 26 +++-
.../hive/metastore/api/CompactionInfoStruct.java | 106 +++++++++++++++-
.../hive/metastore/api/CompactionRequest.java | 110 ++++++++++++++++-
.../gen-php/metastore/CompactionInfoStruct.php | 24 ++++
.../thrift/gen-php/metastore/CompactionRequest.php | 24 ++++
.../src/gen/thrift/gen-py/hive_metastore/ttypes.py | 28 ++++-
.../src/gen/thrift/gen-rb/hive_metastore_types.rb | 8 +-
.../src/main/thrift/hive_metastore.thrift | 2 +
.../hadoop/hive/metastore/txn/CompactionInfo.java | 18 ++-
.../hive/metastore/txn/CompactionTxnHandler.java | 23 ++--
.../hadoop/hive/metastore/txn/TxnHandler.java | 6 +
.../src/main/sql/derby/hive-schema-4.0.0.derby.sql | 6 +-
.../derby/upgrade-4.0.0-alpha-2-to-4.0.0.derby.sql | 4 +
.../src/main/sql/mssql/hive-schema-4.0.0.mssql.sql | 2 +
.../mssql/upgrade-4.0.0-alpha-2-to-4.0.0.mssql.sql | 4 +
.../src/main/sql/mysql/hive-schema-4.0.0.mysql.sql | 6 +-
.../mysql/upgrade-4.0.0-alpha-2-to-4.0.0.mysql.sql | 4 +
.../main/sql/oracle/hive-schema-4.0.0.oracle.sql | 6 +-
.../upgrade-4.0.0-alpha-2-to-4.0.0.oracle.sql | 4 +
.../sql/postgres/hive-schema-4.0.0.postgres.sql | 6 +-
.../upgrade-4.0.0-alpha-2-to-4.0.0.postgres.sql | 4 +
.../upgrade-3.1.3000-to-4.0.0.postgres.sql | 4 +
40 files changed, 662 insertions(+), 116 deletions(-)
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 9e4c083414c..402d20be745 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -41,14 +41,9 @@ import java.util.NoSuchElementException;
import java.util.Random;
import java.util.Set;
import java.util.Map;
-import java.util.Spliterator;
-import java.util.Spliterators;
-import java.util.stream.Stream;
-import java.util.stream.StreamSupport;
import java.util.StringTokenizer;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Streams;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.ContentSummary;
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
index 962d714ef76..07971abac7f 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
@@ -63,7 +63,6 @@ import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.BucketCodec;
import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.processors.CommandProcessorException;
import org.apache.hive.streaming.HiveStreamingConnection;
import org.apache.hive.streaming.StreamingConnection;
@@ -89,22 +88,6 @@ import static org.mockito.Mockito.*;
@SuppressWarnings("deprecation")
public class TestCrudCompactorOnTez extends CompactorOnTezTest {
- @Test
- public void testParquetRead() throws Exception {
- conf.set("tez.grouping.min-size", "10000000");
- conf.set("tez.grouping.max-size", "80000000");
- conf.set("hive.vectorized.execution.enabled", "false");
- driver = new Driver(conf);
-
- String dbName = "default";
- String tblName = "parq_test";
- executeStatementOnDriver("drop table if exists " + tblName, driver);
- executeStatementOnDriver("create transactional table " + tblName + " (a int, b int) stored as PARQUET", driver);
- executeStatementOnDriver("insert into " + tblName + " values(1,2),(1,3),(1,4),(2,2),(2,3),(2,4)", driver);
- executeStatementOnDriver("insert into " + tblName + " values(3,2),(3,3),(3,4),(4,2),(4,3),(4,4)", driver);
- executeStatementOnDriver("select * from " + tblName + " where b = 2", driver);
- }
-
@Test
public void testRebalanceCompactionOfNotPartitionedImplicitlyBucketedTable() throws Exception {
conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true);
@@ -354,6 +337,125 @@ public class TestCrudCompactorOnTez extends CompactorOnTezTest {
"Cannot execute rebalancing compaction on bucketed tables.", compacts.get(0).getErrorMessage());
}
+ @Test
+ public void testRebalanceCompactionNotPartitionedExplicitBucketNumbers() throws Exception {
+ conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true);
+ conf.setBoolVar(HiveConf.ConfVars.HIVE_COMPACTOR_GATHER_STATS, false);
+ conf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false);
+
+ //set grouping size to have 3 buckets, and re-create driver with the new config
+ conf.set("tez.grouping.min-size", "1000");
+ conf.set("tez.grouping.max-size", "80000");
+ driver = new Driver(conf);
+
+ final String stageTableName = "stage_rebalance_test";
+ final String tableName = "rebalance_test";
+ AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf);
+
+ TestDataProvider testDataProvider = new TestDataProvider();
+ testDataProvider.createFullAcidTable(stageTableName, true, false);
+ testDataProvider.insertTestDataPartitioned(stageTableName);
+
+ executeStatementOnDriver("drop table if exists " + tableName, driver);
+ executeStatementOnDriver("CREATE TABLE " + tableName + "(a string, b int) " +
+ "STORED AS ORC TBLPROPERTIES('transactional'='true')", driver);
+ executeStatementOnDriver("INSERT OVERWRITE TABLE " + tableName + " select a, b from " + stageTableName, driver);
+
+ //do some single inserts to have more data in the first bucket.
+ executeStatementOnDriver("INSERT INTO TABLE " + tableName + " values ('12',12)", driver);
+ executeStatementOnDriver("INSERT INTO TABLE " + tableName + " values ('13',13)", driver);
+ executeStatementOnDriver("INSERT INTO TABLE " + tableName + " values ('14',14)", driver);
+ executeStatementOnDriver("INSERT INTO TABLE " + tableName + " values ('15',15)", driver);
+ executeStatementOnDriver("INSERT INTO TABLE " + tableName + " values ('16',16)", driver);
+ executeStatementOnDriver("INSERT INTO TABLE " + tableName + " values ('17',17)", driver);
+
+ // Verify buckets and their content before rebalance
+ Table table = msClient.getTable("default", tableName);
+ FileSystem fs = FileSystem.get(conf);
+ Assert.assertEquals("Test setup does not match the expected: different buckets",
+ Arrays.asList("bucket_00000_0", "bucket_00001_0", "bucket_00002_0"),
+ CompactorTestUtil.getBucketFileNames(fs, table, null, "base_0000001"));
+ String[][] expectedBuckets = new String[][] {
+ {
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t5\t4",
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t6\t2",
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":2}\t6\t3",
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":3}\t6\t4",
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":4}\t5\t2",
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":5}\t5\t3",
+ "{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t12\t12",
+ "{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t13\t13",
+ "{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t14\t14",
+ "{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\t15\t15",
+ "{\"writeid\":6,\"bucketid\":536870912,\"rowid\":0}\t16\t16",
+ "{\"writeid\":7,\"bucketid\":536870912,\"rowid\":0}\t17\t17",
+ },
+ {
+ "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t2\t4",
+ "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t3\t3",
+ "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":2}\t4\t4",
+ "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":3}\t4\t3",
+ },
+ {
+ "{\"writeid\":1,\"bucketid\":537001984,\"rowid\":0}\t2\t3",
+ "{\"writeid\":1,\"bucketid\":537001984,\"rowid\":1}\t3\t4",
+ },
+ };
+ for(int i = 0; i < 3; i++) {
+ Assert.assertEquals("rebalanced bucket " + i, Arrays.asList(expectedBuckets[i]),
+ testDataProvider.getBucketData(tableName, BucketCodec.V1.encode(options.bucket(i)) + ""));
+ }
+
+ //Try to do a rebalancing compaction
+ executeStatementOnDriver("ALTER TABLE " + tableName + " COMPACT 'rebalance' CLUSTERED INTO 4 BUCKETS", driver);
+ runWorker(conf);
+
+ //Check if the compaction succeed
+ TxnStore txnHandler = TxnUtils.getTxnStore(conf);
+ ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+ List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+ Assert.assertEquals("Expecting 1 rows and found " + compacts.size(), 1, compacts.size());
+ Assert.assertEquals("Expecting compaction state 'ready for cleaning' and found:" + compacts.get(0).getState(),
+ "ready for cleaning", compacts.get(0).getState());
+
+ // Verify buckets and their content after rebalance
+ Assert.assertEquals("Buckets does not match after compaction",
+ Arrays.asList("bucket_00000", "bucket_00001", "bucket_00002", "bucket_00003"),
+ CompactorTestUtil.getBucketFileNames(fs, table, null, "base_0000007_v0000020"));
+ expectedBuckets = new String[][] {
+ {
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t5\t4",
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t6\t2",
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":2}\t6\t3",
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":3}\t6\t4",
+ "{\"writeid\":1,\"bucketid\":536870912,\"rowid\":4}\t5\t2",
+ },
+ {
+ "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":5}\t5\t3",
+ "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":6}\t2\t4",
+ "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":7}\t3\t3",
+ "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":8}\t4\t4",
+ "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":9}\t4\t3",
+ },
+ {
+ "{\"writeid\":1,\"bucketid\":537001984,\"rowid\":10}\t2\t3",
+ "{\"writeid\":1,\"bucketid\":537001984,\"rowid\":11}\t3\t4",
+ "{\"writeid\":2,\"bucketid\":537001984,\"rowid\":12}\t12\t12",
+ "{\"writeid\":3,\"bucketid\":537001984,\"rowid\":13}\t13\t13",
+ "{\"writeid\":4,\"bucketid\":537001984,\"rowid\":14}\t14\t14",
+ },
+ {
+ "{\"writeid\":5,\"bucketid\":537067520,\"rowid\":15}\t15\t15",
+ "{\"writeid\":6,\"bucketid\":537067520,\"rowid\":16}\t16\t16",
+ "{\"writeid\":7,\"bucketid\":537067520,\"rowid\":17}\t17\t17",
+ },
+ };
+ for(int i = 0; i < expectedBuckets.length; i++) {
+ Assert.assertEquals("rebalanced bucket " + i, Arrays.asList(expectedBuckets[i]),
+ testDataProvider.getBucketData(tableName, BucketCodec.V1.encode(options.bucket(i)) + ""));
+ }
+ }
+
@Test
public void testCompactionShouldNotFailOnPartitionsWithBooleanField() throws Exception {
conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true);
diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
index 7e44c7bd4ba..9f006523aab 100644
--- a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
@@ -1097,7 +1097,8 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` (
`CQ_INITIATOR_VERSION` string,
`CQ_WORKER_VERSION` string,
`CQ_CLEANER_START` bigint,
- `CQ_POOL_NAME` string
+ `CQ_POOL_NAME` string,
+ `CQ_NUMBER_OF_BUCKETS` string
)
STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
TBLPROPERTIES (
@@ -1125,7 +1126,8 @@ TBLPROPERTIES (
\"COMPACTION_QUEUE\".\"CQ_INITIATOR_VERSION\",
\"COMPACTION_QUEUE\".\"CQ_WORKER_VERSION\",
\"COMPACTION_QUEUE\".\"CQ_CLEANER_START\",
- \"COMPACTION_QUEUE\".\"CQ_POOL_NAME\"
+ \"COMPACTION_QUEUE\".\"CQ_POOL_NAME\",
+ \"COMPACTION_QUEUE\".\"CQ_NUMBER_OF_BUCKETS\"
FROM \"COMPACTION_QUEUE\"
"
);
@@ -1152,7 +1154,8 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` (
`CC_INITIATOR_ID` string,
`CC_INITIATOR_VERSION` string,
`CC_WORKER_VERSION` string,
- `CC_POOL_NAME` string
+ `CC_POOL_NAME` string,
+ `CC_NUMBER_OF_BUCKETS` string
)
STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
TBLPROPERTIES (
@@ -1180,7 +1183,8 @@ TBLPROPERTIES (
\"COMPLETED_COMPACTIONS\".\"CC_INITIATOR_ID\",
\"COMPLETED_COMPACTIONS\".\"CC_INITIATOR_VERSION\",
\"COMPLETED_COMPACTIONS\".\"CC_WORKER_VERSION\",
- \"COMPLETED_COMPACTIONS\".\"CC_POOL_NAME\"
+ \"COMPLETED_COMPACTIONS\".\"CC_POOL_NAME\",
+ \"COMPLETED_COMPACTIONS\".\"CC_NUMBER_OF_BUCKETS\"
FROM \"COMPLETED_COMPACTIONS\"
"
);
@@ -1212,6 +1216,7 @@ CREATE OR REPLACE VIEW `COMPACTIONS`
`C_INITIATOR_VERSION`,
`C_CLEANER_START`,
`C_POOL_NAME`,
+ `C_NUMBER_OF_BUCKETS`,
`C_TBLPROPERTIES`
) AS
SELECT
@@ -1241,6 +1246,7 @@ SELECT
CC_INITIATOR_VERSION,
NULL,
NVL(CC_POOL_NAME, 'default'),
+ CC_NUMBER_OF_BUCKETS,
CC_TBLPROPERTIES
FROM COMPLETED_COMPACTIONS
UNION ALL
@@ -1270,6 +1276,7 @@ SELECT
CQ_INITIATOR_VERSION,
FROM_UNIXTIME(CQ_CLEANER_START DIV 1000),
NVL(CQ_POOL_NAME, 'default'),
+ CQ_NUMBER_OF_BUCKETS,
CQ_TBLPROPERTIES
FROM COMPACTION_QUEUE;
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g
index 22cd1471055..b29e0443b93 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g
@@ -440,8 +440,8 @@ compactPool
alterStatementSuffixCompact
@init { gParent.msgs.push("compaction request"); }
@after { gParent.msgs.pop(); }
- : KW_COMPACT compactType=StringLiteral blocking? compactPool? (KW_WITH KW_OVERWRITE KW_TBLPROPERTIES tableProperties)?
- -> ^(TOK_ALTERTABLE_COMPACT $compactType blocking? compactPool? tableProperties?)
+ : KW_COMPACT compactType=StringLiteral tableImplBuckets? blocking? compactPool? (KW_WITH KW_OVERWRITE KW_TBLPROPERTIES tableProperties)?
+ -> ^(TOK_ALTERTABLE_COMPACT $compactType tableImplBuckets? blocking? compactPool? tableProperties?)
;
alterStatementSuffixSetOwner
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 3841a8f385c..93755f7caf4 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -1917,6 +1917,14 @@ tableBuckets
-> ^(TOK_ALTERTABLE_BUCKETS $bucketCols $sortCols? $num)
;
+tableImplBuckets
+@init { pushMsg("implicit table buckets specification", state); }
+@after { popMsg(state); }
+ :
+ KW_CLUSTERED KW_INTO num=Number KW_BUCKETS
+ -> ^(TOK_ALTERTABLE_BUCKETS $num)
+ ;
+
tableSkewed
@init { pushMsg("table skewed specification", state); }
@after { popMsg(state); }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactAnalyzer.java
index df95c80a53b..f0662f28713 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactAnalyzer.java
@@ -46,6 +46,7 @@ public class AlterTableCompactAnalyzer extends AbstractAlterTableAnalyzer {
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command)
throws SemanticException {
String type = unescapeSQLString(command.getChild(0).getText()).toLowerCase();
+ int numberOfBuckets = 0;
try {
CompactionType.valueOf(type.toUpperCase());
} catch (IllegalArgumentException e) {
@@ -67,12 +68,20 @@ public class AlterTableCompactAnalyzer extends AbstractAlterTableAnalyzer {
case HiveParser.TOK_COMPACT_POOL:
poolName = unescapeSQLString(node.getChild(0).getText());
break;
+ case HiveParser.TOK_ALTERTABLE_BUCKETS:
+ try {
+ numberOfBuckets = Integer.parseInt(node.getChild(0).getText());
+ } catch (NumberFormatException nfe) {
+ throw new SemanticException("Could not parse bucket number: " + node.getChild(0).getText());
+ }
+ break;
default:
break;
}
}
- AlterTableCompactDesc desc = new AlterTableCompactDesc(tableName, partitionSpec, type, isBlocking, poolName, mapProp);
+ AlterTableCompactDesc desc = new AlterTableCompactDesc(tableName, partitionSpec, type, isBlocking, poolName,
+ numberOfBuckets, mapProp);
addInputsOutputsAlterTable(tableName, partitionSpec, desc, desc.getType(), false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactDesc.java
index 98088ea5f4c..48876498756 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactDesc.java
@@ -39,17 +39,20 @@ public class AlterTableCompactDesc extends AbstractAlterTableDesc implements DDL
private final String compactionType;
private final boolean isBlocking;
private final String poolName;
+ private final int numberOfBuckets;
private final Map<String, String> properties;
private Long writeId;
public AlterTableCompactDesc(TableName tableName, Map<String, String> partitionSpec, String compactionType,
- boolean isBlocking, String poolName, Map<String, String> properties) throws SemanticException{
+ boolean isBlocking, String poolName, int numberOfBuckets, Map<String, String> properties)
+ throws SemanticException{
super(AlterTableType.COMPACT, tableName, partitionSpec, null, false, false, properties);
this.tableName = tableName.getNotEmptyDbTable();
this.partitionSpec = partitionSpec;
this.compactionType = compactionType;
this.isBlocking = isBlocking;
this.poolName = poolName;
+ this.numberOfBuckets = numberOfBuckets;
this.properties = properties;
}
@@ -79,6 +82,11 @@ public class AlterTableCompactDesc extends AbstractAlterTableDesc implements DDL
return poolName;
}
+ @Explain(displayName = "numberOfBuckets", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public int getNumberOfBuckets() {
+ return numberOfBuckets;
+ }
+
@Explain(displayName = "properties", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
public Map<String, String> getProperties() {
return properties;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java
index 784c6724085..e1bcd9a92c8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java
@@ -103,6 +103,9 @@ public class AlterTableCompactOperation extends DDLOperation<AlterTableCompactDe
req.setProperties(desc.getProperties());
req.setInitiatorId(JavaUtils.hostname() + "-" + HiveMetaStoreClient.MANUALLY_INITIATED_COMPACTION);
req.setInitiatorVersion(HiveMetaStoreClient.class.getPackage().getImplementationVersion());
+ if (desc.getNumberOfBuckets() > 0) {
+ req.setNumberOfBuckets(desc.getNumberOfBuckets());
+ }
CompactionResponse resp = context.getDb().compact(req);
if (resp.isAccepted()) {
context.getConsole().printInfo("Compaction enqueued with id " + resp.getId());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/concatenate/AlterTableConcatenateAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/concatenate/AlterTableConcatenateAnalyzer.java
index f9743506b6e..c81bd2b72ef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/concatenate/AlterTableConcatenateAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/concatenate/AlterTableConcatenateAnalyzer.java
@@ -104,7 +104,7 @@ public class AlterTableConcatenateAnalyzer extends AbstractAlterTableAnalyzer {
boolean isBlocking = !HiveConf.getBoolVar(conf, ConfVars.TRANSACTIONAL_CONCATENATE_NOBLOCK, false);
AlterTableCompactDesc desc = new AlterTableCompactDesc(tableName, partitionSpec, CompactionType.MAJOR.name(), isBlocking,
- poolName, null);
+ poolName, 0, null);
addInputsOutputsAlterTable(tableName, partitionSpec, desc, desc.getType(), false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
setAcidDdlDesc(getTable(tableName), desc);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index c910d438c68..c205f2c974f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -32,7 +32,6 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import java.io.UncheckedIOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
@@ -63,8 +62,6 @@ import java.util.Optional;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
-import java.util.Spliterator;
-import java.util.Spliterators;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
@@ -77,8 +74,6 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import java.util.stream.StreamSupport;
import java.util.zip.Deflater;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.InflaterInputStream;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java
index 8ae4907d95d..765272d97f7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java
@@ -24,6 +24,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -146,7 +147,9 @@ public class MRCompactor implements Compactor {
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
job.setJarByClass(MRCompactor.class);
- LOG.debug("User jar set to " + job.getJar());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("User jar set to {}", job.getJar());
+ }
job.setMapperClass(CompactorMap.class);
job.setNumReduceTasks(0);
job.setInputFormat(CompactorInputFormat.class);
@@ -249,10 +252,12 @@ public class MRCompactor implements Compactor {
* Thus, force N minor compactions first to reduce number of deltas and then follow up with
* the compaction actually requested in {@link ci} which now needs to compact a lot fewer deltas
*/
- LOG.warn(parsedDeltas.size() + " delta files found for " + ci.getFullPartitionName()
- + " located at " + sd.getLocation() + "! This is likely a sign of misconfiguration, " +
- "especially if this message repeats. Check that compaction is running properly. Check for any " +
- "runaway/mis-configured process writing to ACID tables, especially using Streaming Ingest API.");
+ if (LOG.isWarnEnabled()) {
+ LOG.warn("{} delta files found for {} located at {}! This is likely a sign of misconfiguration, " +
+ "especially if this message repeats. Check that compaction is running properly. Check for any " +
+ "runaway/mis-configured process writing to ACID tables, especially using Streaming Ingest API.",
+ parsedDeltas.size(), ci.getFullPartitionName(), sd.getLocation());
+ }
int numMinorCompactions = parsedDeltas.size() / maxDeltasToHandle;
parsedDeltas.sort(AcidUtils.ParsedDeltaLight::compareTo);
@@ -286,20 +291,24 @@ public class MRCompactor implements Compactor {
baseDir = dir.getBaseDirectory();
if (baseDir == null) {
List<HdfsFileStatusWithId> originalFiles = dir.getOriginalFiles();
- if (!(originalFiles == null) && !(originalFiles.size() == 0)) {
+ if (originalFiles != null && !originalFiles.isEmpty()) {
// There are original format files
for (HdfsFileStatusWithId stat : originalFiles) {
Path path = stat.getFileStatus().getPath();
//note that originalFiles are all original files recursively not dirs
dirsToSearch.add(path);
- LOG.debug("Adding original file " + path + " to dirs to search");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding original file {} to dirs to search", path);
+ }
}
// Set base to the location so that the input format reads the original files.
baseDir = new Path(sd.getLocation());
}
} else {
// add our base to the list of directories to search for files in.
- LOG.debug("Adding base directory " + baseDir + " to dirs to search");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding base directory {} to dirs to search", baseDir);
+ }
dirsToSearch.add(baseDir);
}
}
@@ -339,7 +348,9 @@ public class MRCompactor implements Compactor {
long minTxn = Long.MAX_VALUE;
long maxTxn = Long.MIN_VALUE;
for (AcidUtils.ParsedDelta delta : parsedDeltas) {
- LOG.debug("Adding delta " + delta.getPath() + " to directories to search");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding delta {} to directories to search", delta.getPath());
+ }
dirsToSearch.add(delta.getPath());
deltaDirs.add(delta.getPath());
minTxn = Math.min(minTxn, delta.getMinWriteId());
@@ -367,17 +378,16 @@ public class MRCompactor implements Compactor {
if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST)) {
mrJob = job;
}
-
- LOG.info("Submitting " + compactionType + " compaction job '" +
- job.getJobName() + "' to " + job.getQueueName() + " queue. " +
- "(current delta dirs count=" + curDirNumber +
- ", obsolete delta dirs count=" + obsoleteDirNumber + ". TxnIdRange[" + minTxn + "," + maxTxn + "]");
- JobClient jc = null;
- try {
- jc = new JobClient(job);
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Submitting {} compaction job '{}' to {} queue. (current delta dirs count={}, obsolete delta dirs " +
+ "count={}. TxnIdRange[{},{}}]",
+ compactionType, job.getJobName(), job.getQueueName(), curDirNumber, obsoleteDirNumber, minTxn, maxTxn);
+ }
+ try (JobClient jc = new JobClient(job)) {
RunningJob rj = jc.submitJob(job);
- LOG.info("Submitted compaction job '" + job.getJobName() +
- "' with jobID=" + rj.getID() + " compaction ID=" + id);
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Submitted compaction job '{}' with jobID={} compaction ID={}", job.getJobName(), rj.getID(), id);
+ }
try {
msc.setHadoopJobid(rj.getID().toString(), id);
} catch (TException e) {
@@ -387,11 +397,7 @@ public class MRCompactor implements Compactor {
rj.waitForCompletion();
if (!rj.isSuccessful()) {
throw new IOException((compactionType == CompactionType.MAJOR ? "Major" : "Minor") +
- " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID());
- }
- } finally {
- if (jc!=null) {
- jc.close();
+ " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID());
}
}
}
@@ -452,19 +458,15 @@ public class MRCompactor implements Compactor {
bucketNum = bucket;
this.base = base;
this.deltas = deltas;
- locations = new ArrayList<String>();
+ locations = new ArrayList<>();
this.deltasToAttemptId = deltasToAttemptId;
for (Path path : files) {
FileSystem fs = path.getFileSystem(hadoopConf);
FileStatus stat = fs.getFileStatus(path);
length += stat.getLen();
- BlockLocation[] locs = fs.getFileBlockLocations(stat, 0, length);
- for (int i = 0; i < locs.length; i++) {
- String[] hosts = locs[i].getHosts();
- for (int j = 0; j < hosts.length; j++) {
- locations.add(hosts[j]);
- }
+ for (BlockLocation loc : fs.getFileBlockLocations(stat, 0, length)) {
+ Collections.addAll(locations, loc.getHosts());
}
}
}
@@ -959,9 +961,9 @@ public class MRCompactor implements Compactor {
}
private static <T> T instantiate(Class<T> classType, String classname) throws IOException {
- T t = null;
+ T t;
try {
- Class c = JavaUtils.loadClass(classname);
+ Class<?> c = JavaUtils.loadClass(classname);
Object o = c.newInstance();
if (classType.isAssignableFrom(o.getClass())) {
t = (T)o;
@@ -1010,8 +1012,9 @@ public class MRCompactor implements Compactor {
Path tmpLocation = new Path(conf.get(TMP_LOCATION));//this contains base_xxx or delta_xxx_yyy
Path finalLocation = new Path(conf.get(FINAL_LOCATION));
FileSystem fs = tmpLocation.getFileSystem(conf);
- LOG.debug("Moving contents of " + tmpLocation.toString() + " to " +
- finalLocation.toString());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Moving contents of {} to {}", tmpLocation, finalLocation);
+ }
if(!fs.exists(tmpLocation)) {
/*
* No 'tmpLocation' may happen if job generated created 0 splits, which happens if all
@@ -1028,8 +1031,9 @@ public class MRCompactor implements Compactor {
.statementId(-1)
.visibilityTxnId(Compactor.getCompactorTxnId(conf));
Path newDeltaDir = AcidUtils.createFilename(finalLocation, options).getParent();
- LOG.info(context.getJobID() + ": " + tmpLocation +
- " not found. Assuming 0 splits. Creating " + newDeltaDir);
+ if (LOG.isInfoEnabled()) {
+ LOG.info("{}: {} not found. Assuming 0 splits. Creating {}", context.getJobID(), tmpLocation, newDeltaDir);
+ }
fs.mkdirs(newDeltaDir);
if (options.isWriteVersionFile()) {
AcidUtils.OrcAcidVersion.writeVersionFile(newDeltaDir, fs);
@@ -1059,7 +1063,9 @@ public class MRCompactor implements Compactor {
JobConf conf = ShimLoader.getHadoopShims().getJobConf(context);
Path tmpLocation = new Path(conf.get(TMP_LOCATION));
FileSystem fs = tmpLocation.getFileSystem(conf);
- LOG.debug("Removing " + tmpLocation.toString());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Removing {}", tmpLocation);
+ }
fs.delete(tmpLocation, true);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RebalanceQueryCompactor.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RebalanceQueryCompactor.java
index 5b6ff8d7f8c..5e80e52f63b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RebalanceQueryCompactor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RebalanceQueryCompactor.java
@@ -50,11 +50,14 @@ final class RebalanceQueryCompactor extends QueryCompactor {
String tmpTableName = getTempTableName(table);
Path tmpTablePath = QueryCompactor.Util.getCompactionResultDir(storageDescriptor, writeIds,
conf, true, false, false, null);
+ int numBuckets = compactionInfo.numberOfBuckets;
+ if (numBuckets <= 0) {
+ //TODO: This is quite expensive, a better way should be found to get the number of buckets for an implicitly bucketed table
+ numBuckets = Streams.stream(new FileUtils.AdaptingIterator<>(FileUtils.listFiles(dir.getFs(), dir.getPath(), true, AcidUtils.bucketFileFilter)))
+ .map(f -> AcidUtils.parseBucketId(f.getPath()))
+ .collect(Collectors.toSet()).size();
+ }
- //TODO: This is quite expensive, a better way should be found to get the number of buckets for an implicitly bucketed table
- int numBuckets = Streams.stream(new FileUtils.AdaptingIterator<>(FileUtils.listFiles(dir.getFs(), dir.getPath(), true, AcidUtils.bucketFileFilter)))
- .map(f -> AcidUtils.parseBucketId(f.getPath()))
- .collect(Collectors.toSet()).size();
List<String> createQueries = getCreateQueries(tmpTableName, table, tmpTablePath.toString());
List<String> compactionQueries = getCompactionQueries(table, partition, tmpTableName, numBuckets);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index f88f0772d53..adadc415d4d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -313,6 +313,13 @@ public class Worker extends RemoteCompactorThread implements MetaStoreThread {
return false;
}
+ if (!ci.type.equals(CompactionType.REBALANCE) && ci.numberOfBuckets > 0) {
+ if (LOG.isWarnEnabled()) {
+ LOG.warn("Only the REBALANCE compaction accepts the number of buckets clause (CLUSTERED INTO {N} BUCKETS). " +
+ "Since the compaction request is {}, it will be ignored.", ci.type);
+ }
+ }
+
checkInterrupt();
String fullTableName = TxnUtils.getFullTableName(table.getDbName(), table.getTableName());
diff --git a/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out b/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
index 6f0559197eb..9caa03e075b 100644
--- a/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
+++ b/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
@@ -46,6 +46,7 @@ STAGE PLANS:
Compact
compaction type: major
table name: default.T1_n153
+ numberOfBuckets: 0
pool: test
table name: default.T1_n153
diff --git a/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact2.q.out b/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact2.q.out
index 92e4762a60b..e0b6d31c688 100644
--- a/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact2.q.out
+++ b/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact2.q.out
@@ -44,6 +44,7 @@ STAGE PLANS:
Compact
compaction type: minor
table name: default.T1_n105
+ numberOfBuckets: 0
partition spec:
ds yesterday
table name: default.T1_n105
diff --git a/ql/src/test/results/clientpositive/llap/sysdb.q.out b/ql/src/test/results/clientpositive/llap/sysdb.q.out
index 0450496678d..fb98a090b99 100644
--- a/ql/src/test/results/clientpositive/llap/sysdb.q.out
+++ b/ql/src/test/results/clientpositive/llap/sysdb.q.out
@@ -488,6 +488,7 @@ compaction_queue cq_id
compaction_queue cq_initiator_id
compaction_queue cq_initiator_version
compaction_queue cq_next_txn_id
+compaction_queue cq_number_of_buckets
compaction_queue cq_partition
compaction_queue cq_pool_name
compaction_queue cq_run_as
@@ -526,6 +527,7 @@ compactions c_initiator_version
compactions c_initiator_version
compactions c_next_txn_id
compactions c_next_txn_id
+compactions c_number_of_buckets
compactions c_partition
compactions c_partition
compactions c_pool_name
@@ -561,6 +563,7 @@ completed_compactions cc_id
completed_compactions cc_initiator_id
completed_compactions cc_initiator_version
completed_compactions cc_next_txn_id
+completed_compactions cc_number_of_buckets
completed_compactions cc_partition
completed_compactions cc_pool_name
completed_compactions cc_run_as
@@ -1590,8 +1593,8 @@ POSTHOOK: Input: sys@compaction_queue
POSTHOOK: Input: sys@compactions
POSTHOOK: Input: sys@completed_compactions
#### A masked pattern was here ####
-1 default default scr_txn NULL major initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.0.0-SNAPSHOT NULL default NULL
-2 default default scr_txn_2 NULL minor initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.0.0-SNAPSHOT NULL default NULL
+1 default default scr_txn NULL major initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.0.0-SNAPSHOT NULL default NULL NULL
+2 default default scr_txn_2 NULL minor initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.0.0-SNAPSHOT NULL default NULL NULL
PREHOOK: query: use INFORMATION_SCHEMA
PREHOOK: type: SWITCHDATABASE
PREHOOK: Input: database:information_schema
diff --git a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
index a054e08c713..57a21e9a869 100644
--- a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
+++ b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
@@ -832,6 +832,7 @@ STAGE PLANS:
Compact
compaction type: major
table name: default.over10k_orc_bucketed_n0
+ numberOfBuckets: 0
properties:
compactor.hive.tez.container.size 500
compactor.mapreduce.map.memory.mb 500
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 7a3d4abeebc..77fc3ec076d 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -26771,6 +26771,11 @@ void CompactionRequest::__set_poolName(const std::string& val) {
this->poolName = val;
__isset.poolName = true;
}
+
+void CompactionRequest::__set_numberOfBuckets(const int32_t val) {
+ this->numberOfBuckets = val;
+__isset.numberOfBuckets = true;
+}
std::ostream& operator<<(std::ostream& out, const CompactionRequest& obj)
{
obj.printTo(out);
@@ -26891,6 +26896,14 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 10:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->numberOfBuckets);
+ this->__isset.numberOfBuckets = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -26965,6 +26978,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeString(this->poolName);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.numberOfBuckets) {
+ xfer += oprot->writeFieldBegin("numberOfBuckets", ::apache::thrift::protocol::T_I32, 10);
+ xfer += oprot->writeI32(this->numberOfBuckets);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -26981,6 +26999,7 @@ void swap(CompactionRequest &a, CompactionRequest &b) {
swap(a.initiatorId, b.initiatorId);
swap(a.initiatorVersion, b.initiatorVersion);
swap(a.poolName, b.poolName);
+ swap(a.numberOfBuckets, b.numberOfBuckets);
swap(a.__isset, b.__isset);
}
@@ -26994,6 +27013,7 @@ CompactionRequest::CompactionRequest(const CompactionRequest& other978) {
initiatorId = other978.initiatorId;
initiatorVersion = other978.initiatorVersion;
poolName = other978.poolName;
+ numberOfBuckets = other978.numberOfBuckets;
__isset = other978.__isset;
}
CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other979) {
@@ -27006,6 +27026,7 @@ CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other97
initiatorId = other979.initiatorId;
initiatorVersion = other979.initiatorVersion;
poolName = other979.poolName;
+ numberOfBuckets = other979.numberOfBuckets;
__isset = other979.__isset;
return *this;
}
@@ -27021,6 +27042,7 @@ void CompactionRequest::printTo(std::ostream& out) const {
out << ", " << "initiatorId="; (__isset.initiatorId ? (out << to_string(initiatorId)) : (out << "<null>"));
out << ", " << "initiatorVersion="; (__isset.initiatorVersion ? (out << to_string(initiatorVersion)) : (out << "<null>"));
out << ", " << "poolName="; (__isset.poolName ? (out << to_string(poolName)) : (out << "<null>"));
+ out << ", " << "numberOfBuckets="; (__isset.numberOfBuckets ? (out << to_string(numberOfBuckets)) : (out << "<null>"));
out << ")";
}
@@ -27109,6 +27131,11 @@ void CompactionInfoStruct::__set_poolname(const std::string& val) {
this->poolname = val;
__isset.poolname = true;
}
+
+void CompactionInfoStruct::__set_numberOfBuckets(const int32_t val) {
+ this->numberOfBuckets = val;
+__isset.numberOfBuckets = true;
+}
std::ostream& operator<<(std::ostream& out, const CompactionInfoStruct& obj)
{
obj.printTo(out);
@@ -27279,6 +27306,14 @@ uint32_t CompactionInfoStruct::read(::apache::thrift::protocol::TProtocol* iprot
xfer += iprot->skip(ftype);
}
break;
+ case 18:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->numberOfBuckets);
+ this->__isset.numberOfBuckets = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -27385,6 +27420,11 @@ uint32_t CompactionInfoStruct::write(::apache::thrift::protocol::TProtocol* opro
xfer += oprot->writeString(this->poolname);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.numberOfBuckets) {
+ xfer += oprot->writeFieldBegin("numberOfBuckets", ::apache::thrift::protocol::T_I32, 18);
+ xfer += oprot->writeI32(this->numberOfBuckets);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -27409,6 +27449,7 @@ void swap(CompactionInfoStruct &a, CompactionInfoStruct &b) {
swap(a.enqueueTime, b.enqueueTime);
swap(a.retryRetention, b.retryRetention);
swap(a.poolname, b.poolname);
+ swap(a.numberOfBuckets, b.numberOfBuckets);
swap(a.__isset, b.__isset);
}
@@ -27430,6 +27471,7 @@ CompactionInfoStruct::CompactionInfoStruct(const CompactionInfoStruct& other981)
enqueueTime = other981.enqueueTime;
retryRetention = other981.retryRetention;
poolname = other981.poolname;
+ numberOfBuckets = other981.numberOfBuckets;
__isset = other981.__isset;
}
CompactionInfoStruct& CompactionInfoStruct::operator=(const CompactionInfoStruct& other982) {
@@ -27450,6 +27492,7 @@ CompactionInfoStruct& CompactionInfoStruct::operator=(const CompactionInfoStruct
enqueueTime = other982.enqueueTime;
retryRetention = other982.retryRetention;
poolname = other982.poolname;
+ numberOfBuckets = other982.numberOfBuckets;
__isset = other982.__isset;
return *this;
}
@@ -27473,6 +27516,7 @@ void CompactionInfoStruct::printTo(std::ostream& out) const {
out << ", " << "enqueueTime="; (__isset.enqueueTime ? (out << to_string(enqueueTime)) : (out << "<null>"));
out << ", " << "retryRetention="; (__isset.retryRetention ? (out << to_string(retryRetention)) : (out << "<null>"));
out << ", " << "poolname="; (__isset.poolname ? (out << to_string(poolname)) : (out << "<null>"));
+ out << ", " << "numberOfBuckets="; (__isset.numberOfBuckets ? (out << to_string(numberOfBuckets)) : (out << "<null>"));
out << ")";
}
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 3c5ebe4a4d4..02df01b1042 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -10606,13 +10606,14 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b);
std::ostream& operator<<(std::ostream& out, const HeartbeatTxnRangeResponse& obj);
typedef struct _CompactionRequest__isset {
- _CompactionRequest__isset() : partitionname(false), runas(false), properties(false), initiatorId(false), initiatorVersion(false), poolName(false) {}
+ _CompactionRequest__isset() : partitionname(false), runas(false), properties(false), initiatorId(false), initiatorVersion(false), poolName(false), numberOfBuckets(false) {}
bool partitionname :1;
bool runas :1;
bool properties :1;
bool initiatorId :1;
bool initiatorVersion :1;
bool poolName :1;
+ bool numberOfBuckets :1;
} _CompactionRequest__isset;
class CompactionRequest : public virtual ::apache::thrift::TBase {
@@ -10628,7 +10629,8 @@ class CompactionRequest : public virtual ::apache::thrift::TBase {
runas(),
initiatorId(),
initiatorVersion(),
- poolName() {
+ poolName(),
+ numberOfBuckets(0) {
}
virtual ~CompactionRequest() noexcept;
@@ -10645,6 +10647,7 @@ class CompactionRequest : public virtual ::apache::thrift::TBase {
std::string initiatorId;
std::string initiatorVersion;
std::string poolName;
+ int32_t numberOfBuckets;
_CompactionRequest__isset __isset;
@@ -10666,6 +10669,8 @@ class CompactionRequest : public virtual ::apache::thrift::TBase {
void __set_poolName(const std::string& val);
+ void __set_numberOfBuckets(const int32_t val);
+
bool operator == (const CompactionRequest & rhs) const
{
if (!(dbname == rhs.dbname))
@@ -10698,6 +10703,10 @@ class CompactionRequest : public virtual ::apache::thrift::TBase {
return false;
else if (__isset.poolName && !(poolName == rhs.poolName))
return false;
+ if (__isset.numberOfBuckets != rhs.__isset.numberOfBuckets)
+ return false;
+ else if (__isset.numberOfBuckets && !(numberOfBuckets == rhs.numberOfBuckets))
+ return false;
return true;
}
bool operator != (const CompactionRequest &rhs) const {
@@ -10717,7 +10726,7 @@ void swap(CompactionRequest &a, CompactionRequest &b);
std::ostream& operator<<(std::ostream& out, const CompactionRequest& obj);
typedef struct _CompactionInfoStruct__isset {
- _CompactionInfoStruct__isset() : partitionname(false), runas(false), properties(false), toomanyaborts(false), state(false), workerId(false), start(false), highestWriteId(false), errorMessage(false), hasoldabort(false), enqueueTime(false), retryRetention(false), poolname(false) {}
+ _CompactionInfoStruct__isset() : partitionname(false), runas(false), properties(false), toomanyaborts(false), state(false), workerId(false), start(false), highestWriteId(false), errorMessage(false), hasoldabort(false), enqueueTime(false), retryRetention(false), poolname(false), numberOfBuckets(false) {}
bool partitionname :1;
bool runas :1;
bool properties :1;
@@ -10731,6 +10740,7 @@ typedef struct _CompactionInfoStruct__isset {
bool enqueueTime :1;
bool retryRetention :1;
bool poolname :1;
+ bool numberOfBuckets :1;
} _CompactionInfoStruct__isset;
class CompactionInfoStruct : public virtual ::apache::thrift::TBase {
@@ -10755,7 +10765,8 @@ class CompactionInfoStruct : public virtual ::apache::thrift::TBase {
hasoldabort(0),
enqueueTime(0),
retryRetention(0),
- poolname() {
+ poolname(),
+ numberOfBuckets(0) {
}
virtual ~CompactionInfoStruct() noexcept;
@@ -10780,6 +10791,7 @@ class CompactionInfoStruct : public virtual ::apache::thrift::TBase {
int64_t enqueueTime;
int64_t retryRetention;
std::string poolname;
+ int32_t numberOfBuckets;
_CompactionInfoStruct__isset __isset;
@@ -10817,6 +10829,8 @@ class CompactionInfoStruct : public virtual ::apache::thrift::TBase {
void __set_poolname(const std::string& val);
+ void __set_numberOfBuckets(const int32_t val);
+
bool operator == (const CompactionInfoStruct & rhs) const
{
if (!(id == rhs.id))
@@ -10879,6 +10893,10 @@ class CompactionInfoStruct : public virtual ::apache::thrift::TBase {
return false;
else if (__isset.poolname && !(poolname == rhs.poolname))
return false;
+ if (__isset.numberOfBuckets != rhs.__isset.numberOfBuckets)
+ return false;
+ else if (__isset.numberOfBuckets && !(numberOfBuckets == rhs.numberOfBuckets))
+ return false;
return true;
}
bool operator != (const CompactionInfoStruct &rhs) const {
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java
index 379a1a7c0de..04865221775 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java
@@ -28,6 +28,7 @@ package org.apache.hadoop.hive.metastore.api;
private static final org.apache.thrift.protocol.TField ENQUEUE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("enqueueTime", org.apache.thrift.protocol.TType.I64, (short)15);
private static final org.apache.thrift.protocol.TField RETRY_RETENTION_FIELD_DESC = new org.apache.thrift.protocol.TField("retryRetention", org.apache.thrift.protocol.TType.I64, (short)16);
private static final org.apache.thrift.protocol.TField POOLNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("poolname", org.apache.thrift.protocol.TType.STRING, (short)17);
+ private static final org.apache.thrift.protocol.TField NUMBER_OF_BUCKETS_FIELD_DESC = new org.apache.thrift.protocol.TField("numberOfBuckets", org.apache.thrift.protocol.TType.I32, (short)18);
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new CompactionInfoStructStandardSchemeFactory();
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new CompactionInfoStructTupleSchemeFactory();
@@ -49,6 +50,7 @@ package org.apache.hadoop.hive.metastore.api;
private long enqueueTime; // optional
private long retryRetention; // optional
private @org.apache.thrift.annotation.Nullable java.lang.String poolname; // optional
+ private int numberOfBuckets; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -72,7 +74,8 @@ package org.apache.hadoop.hive.metastore.api;
HASOLDABORT((short)14, "hasoldabort"),
ENQUEUE_TIME((short)15, "enqueueTime"),
RETRY_RETENTION((short)16, "retryRetention"),
- POOLNAME((short)17, "poolname");
+ POOLNAME((short)17, "poolname"),
+ NUMBER_OF_BUCKETS((short)18, "numberOfBuckets");
private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
@@ -122,6 +125,8 @@ package org.apache.hadoop.hive.metastore.api;
return RETRY_RETENTION;
case 17: // POOLNAME
return POOLNAME;
+ case 18: // NUMBER_OF_BUCKETS
+ return NUMBER_OF_BUCKETS;
default:
return null;
}
@@ -170,8 +175,9 @@ package org.apache.hadoop.hive.metastore.api;
private static final int __HASOLDABORT_ISSET_ID = 4;
private static final int __ENQUEUETIME_ISSET_ID = 5;
private static final int __RETRYRETENTION_ISSET_ID = 6;
+ private static final int __NUMBEROFBUCKETS_ISSET_ID = 7;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID,_Fields.ERROR_MESSAGE,_Fields.HASOLDABORT,_Fields.ENQUEUE_TIME,_Fields.RETRY_RETENTION,_Fields.POOLNAME};
+ private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID,_Fields.ERROR_MESSAGE,_Fields.HASOLDABORT,_Fields.ENQUEUE_TIME,_Fields.RETRY_RETENTION,_Fields.POOLNAME,_Fields.NUMBER_OF_BUCKETS};
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -209,6 +215,8 @@ package org.apache.hadoop.hive.metastore.api;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.POOLNAME, new org.apache.thrift.meta_data.FieldMetaData("poolname", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.NUMBER_OF_BUCKETS, new org.apache.thrift.meta_data.FieldMetaData("numberOfBuckets", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionInfoStruct.class, metaDataMap);
}
@@ -272,6 +280,7 @@ package org.apache.hadoop.hive.metastore.api;
if (other.isSetPoolname()) {
this.poolname = other.poolname;
}
+ this.numberOfBuckets = other.numberOfBuckets;
}
public CompactionInfoStruct deepCopy() {
@@ -304,6 +313,8 @@ package org.apache.hadoop.hive.metastore.api;
setRetryRetentionIsSet(false);
this.retryRetention = 0;
this.poolname = null;
+ setNumberOfBucketsIsSet(false);
+ this.numberOfBuckets = 0;
}
public long getId() {
@@ -708,6 +719,28 @@ package org.apache.hadoop.hive.metastore.api;
}
}
+ public int getNumberOfBuckets() {
+ return this.numberOfBuckets;
+ }
+
+ public void setNumberOfBuckets(int numberOfBuckets) {
+ this.numberOfBuckets = numberOfBuckets;
+ setNumberOfBucketsIsSet(true);
+ }
+
+ public void unsetNumberOfBuckets() {
+ __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMBEROFBUCKETS_ISSET_ID);
+ }
+
+ /** Returns true if field numberOfBuckets is set (has been assigned a value) and false otherwise */
+ public boolean isSetNumberOfBuckets() {
+ return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __NUMBEROFBUCKETS_ISSET_ID);
+ }
+
+ public void setNumberOfBucketsIsSet(boolean value) {
+ __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMBEROFBUCKETS_ISSET_ID, value);
+ }
+
public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
switch (field) {
case ID:
@@ -846,6 +879,14 @@ package org.apache.hadoop.hive.metastore.api;
}
break;
+ case NUMBER_OF_BUCKETS:
+ if (value == null) {
+ unsetNumberOfBuckets();
+ } else {
+ setNumberOfBuckets((java.lang.Integer)value);
+ }
+ break;
+
}
}
@@ -903,6 +944,9 @@ package org.apache.hadoop.hive.metastore.api;
case POOLNAME:
return getPoolname();
+ case NUMBER_OF_BUCKETS:
+ return getNumberOfBuckets();
+
}
throw new java.lang.IllegalStateException();
}
@@ -948,6 +992,8 @@ package org.apache.hadoop.hive.metastore.api;
return isSetRetryRetention();
case POOLNAME:
return isSetPoolname();
+ case NUMBER_OF_BUCKETS:
+ return isSetNumberOfBuckets();
}
throw new java.lang.IllegalStateException();
}
@@ -1118,6 +1164,15 @@ package org.apache.hadoop.hive.metastore.api;
return false;
}
+ boolean this_present_numberOfBuckets = true && this.isSetNumberOfBuckets();
+ boolean that_present_numberOfBuckets = true && that.isSetNumberOfBuckets();
+ if (this_present_numberOfBuckets || that_present_numberOfBuckets) {
+ if (!(this_present_numberOfBuckets && that_present_numberOfBuckets))
+ return false;
+ if (this.numberOfBuckets != that.numberOfBuckets)
+ return false;
+ }
+
return true;
}
@@ -1191,6 +1246,10 @@ package org.apache.hadoop.hive.metastore.api;
if (isSetPoolname())
hashCode = hashCode * 8191 + poolname.hashCode();
+ hashCode = hashCode * 8191 + ((isSetNumberOfBuckets()) ? 131071 : 524287);
+ if (isSetNumberOfBuckets())
+ hashCode = hashCode * 8191 + numberOfBuckets;
+
return hashCode;
}
@@ -1372,6 +1431,16 @@ package org.apache.hadoop.hive.metastore.api;
return lastComparison;
}
}
+ lastComparison = java.lang.Boolean.compare(isSetNumberOfBuckets(), other.isSetNumberOfBuckets());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetNumberOfBuckets()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numberOfBuckets, other.numberOfBuckets);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -1526,6 +1595,12 @@ package org.apache.hadoop.hive.metastore.api;
}
first = false;
}
+ if (isSetNumberOfBuckets()) {
+ if (!first) sb.append(", ");
+ sb.append("numberOfBuckets:");
+ sb.append(this.numberOfBuckets);
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -1723,6 +1798,14 @@ package org.apache.hadoop.hive.metastore.api;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 18: // NUMBER_OF_BUCKETS
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.numberOfBuckets = iprot.readI32();
+ struct.setNumberOfBucketsIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -1833,6 +1916,11 @@ package org.apache.hadoop.hive.metastore.api;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetNumberOfBuckets()) {
+ oprot.writeFieldBegin(NUMBER_OF_BUCKETS_FIELD_DESC);
+ oprot.writeI32(struct.numberOfBuckets);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -1894,7 +1982,10 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetPoolname()) {
optionals.set(12);
}
- oprot.writeBitSet(optionals, 13);
+ if (struct.isSetNumberOfBuckets()) {
+ optionals.set(13);
+ }
+ oprot.writeBitSet(optionals, 14);
if (struct.isSetPartitionname()) {
oprot.writeString(struct.partitionname);
}
@@ -1934,6 +2025,9 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetPoolname()) {
oprot.writeString(struct.poolname);
}
+ if (struct.isSetNumberOfBuckets()) {
+ oprot.writeI32(struct.numberOfBuckets);
+ }
}
@Override
@@ -1947,7 +2041,7 @@ package org.apache.hadoop.hive.metastore.api;
struct.setTablenameIsSet(true);
struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32());
struct.setTypeIsSet(true);
- java.util.BitSet incoming = iprot.readBitSet(13);
+ java.util.BitSet incoming = iprot.readBitSet(14);
if (incoming.get(0)) {
struct.partitionname = iprot.readString();
struct.setPartitionnameIsSet(true);
@@ -2000,6 +2094,10 @@ package org.apache.hadoop.hive.metastore.api;
struct.poolname = iprot.readString();
struct.setPoolnameIsSet(true);
}
+ if (incoming.get(13)) {
+ struct.numberOfBuckets = iprot.readI32();
+ struct.setNumberOfBucketsIsSet(true);
+ }
}
}
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
index 0520fe6ac7c..fee7d81d95c 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore.api;
private static final org.apache.thrift.protocol.TField INITIATOR_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("initiatorId", org.apache.thrift.protocol.TType.STRING, (short)7);
private static final org.apache.thrift.protocol.TField INITIATOR_VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("initiatorVersion", org.apache.thrift.protocol.TType.STRING, (short)8);
private static final org.apache.thrift.protocol.TField POOL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("poolName", org.apache.thrift.protocol.TType.STRING, (short)9);
+ private static final org.apache.thrift.protocol.TField NUMBER_OF_BUCKETS_FIELD_DESC = new org.apache.thrift.protocol.TField("numberOfBuckets", org.apache.thrift.protocol.TType.I32, (short)10);
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new CompactionRequestStandardSchemeFactory();
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new CompactionRequestTupleSchemeFactory();
@@ -33,6 +34,7 @@ package org.apache.hadoop.hive.metastore.api;
private @org.apache.thrift.annotation.Nullable java.lang.String initiatorId; // optional
private @org.apache.thrift.annotation.Nullable java.lang.String initiatorVersion; // optional
private @org.apache.thrift.annotation.Nullable java.lang.String poolName; // optional
+ private int numberOfBuckets; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -48,7 +50,8 @@ package org.apache.hadoop.hive.metastore.api;
PROPERTIES((short)6, "properties"),
INITIATOR_ID((short)7, "initiatorId"),
INITIATOR_VERSION((short)8, "initiatorVersion"),
- POOL_NAME((short)9, "poolName");
+ POOL_NAME((short)9, "poolName"),
+ NUMBER_OF_BUCKETS((short)10, "numberOfBuckets");
private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
@@ -82,6 +85,8 @@ package org.apache.hadoop.hive.metastore.api;
return INITIATOR_VERSION;
case 9: // POOL_NAME
return POOL_NAME;
+ case 10: // NUMBER_OF_BUCKETS
+ return NUMBER_OF_BUCKETS;
default:
return null;
}
@@ -123,7 +128,9 @@ package org.apache.hadoop.hive.metastore.api;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.INITIATOR_ID,_Fields.INITIATOR_VERSION,_Fields.POOL_NAME};
+ private static final int __NUMBEROFBUCKETS_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.INITIATOR_ID,_Fields.INITIATOR_VERSION,_Fields.POOL_NAME,_Fields.NUMBER_OF_BUCKETS};
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -147,6 +154,8 @@ package org.apache.hadoop.hive.metastore.api;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.POOL_NAME, new org.apache.thrift.meta_data.FieldMetaData("poolName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.NUMBER_OF_BUCKETS, new org.apache.thrift.meta_data.FieldMetaData("numberOfBuckets", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionRequest.class, metaDataMap);
}
@@ -169,6 +178,7 @@ package org.apache.hadoop.hive.metastore.api;
* Performs a deep copy on <i>other</i>.
*/
public CompactionRequest(CompactionRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDbname()) {
this.dbname = other.dbname;
}
@@ -197,6 +207,7 @@ package org.apache.hadoop.hive.metastore.api;
if (other.isSetPoolName()) {
this.poolName = other.poolName;
}
+ this.numberOfBuckets = other.numberOfBuckets;
}
public CompactionRequest deepCopy() {
@@ -214,6 +225,8 @@ package org.apache.hadoop.hive.metastore.api;
this.initiatorId = null;
this.initiatorVersion = null;
this.poolName = null;
+ setNumberOfBucketsIsSet(false);
+ this.numberOfBuckets = 0;
}
@org.apache.thrift.annotation.Nullable
@@ -451,6 +464,28 @@ package org.apache.hadoop.hive.metastore.api;
}
}
+ public int getNumberOfBuckets() {
+ return this.numberOfBuckets;
+ }
+
+ public void setNumberOfBuckets(int numberOfBuckets) {
+ this.numberOfBuckets = numberOfBuckets;
+ setNumberOfBucketsIsSet(true);
+ }
+
+ public void unsetNumberOfBuckets() {
+ __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMBEROFBUCKETS_ISSET_ID);
+ }
+
+ /** Returns true if field numberOfBuckets is set (has been assigned a value) and false otherwise */
+ public boolean isSetNumberOfBuckets() {
+ return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __NUMBEROFBUCKETS_ISSET_ID);
+ }
+
+ public void setNumberOfBucketsIsSet(boolean value) {
+ __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMBEROFBUCKETS_ISSET_ID, value);
+ }
+
public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
switch (field) {
case DBNAME:
@@ -525,6 +560,14 @@ package org.apache.hadoop.hive.metastore.api;
}
break;
+ case NUMBER_OF_BUCKETS:
+ if (value == null) {
+ unsetNumberOfBuckets();
+ } else {
+ setNumberOfBuckets((java.lang.Integer)value);
+ }
+ break;
+
}
}
@@ -558,6 +601,9 @@ package org.apache.hadoop.hive.metastore.api;
case POOL_NAME:
return getPoolName();
+ case NUMBER_OF_BUCKETS:
+ return getNumberOfBuckets();
+
}
throw new java.lang.IllegalStateException();
}
@@ -587,6 +633,8 @@ package org.apache.hadoop.hive.metastore.api;
return isSetInitiatorVersion();
case POOL_NAME:
return isSetPoolName();
+ case NUMBER_OF_BUCKETS:
+ return isSetNumberOfBuckets();
}
throw new java.lang.IllegalStateException();
}
@@ -685,6 +733,15 @@ package org.apache.hadoop.hive.metastore.api;
return false;
}
+ boolean this_present_numberOfBuckets = true && this.isSetNumberOfBuckets();
+ boolean that_present_numberOfBuckets = true && that.isSetNumberOfBuckets();
+ if (this_present_numberOfBuckets || that_present_numberOfBuckets) {
+ if (!(this_present_numberOfBuckets && that_present_numberOfBuckets))
+ return false;
+ if (this.numberOfBuckets != that.numberOfBuckets)
+ return false;
+ }
+
return true;
}
@@ -728,6 +785,10 @@ package org.apache.hadoop.hive.metastore.api;
if (isSetPoolName())
hashCode = hashCode * 8191 + poolName.hashCode();
+ hashCode = hashCode * 8191 + ((isSetNumberOfBuckets()) ? 131071 : 524287);
+ if (isSetNumberOfBuckets())
+ hashCode = hashCode * 8191 + numberOfBuckets;
+
return hashCode;
}
@@ -829,6 +890,16 @@ package org.apache.hadoop.hive.metastore.api;
return lastComparison;
}
}
+ lastComparison = java.lang.Boolean.compare(isSetNumberOfBuckets(), other.isSetNumberOfBuckets());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetNumberOfBuckets()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numberOfBuckets, other.numberOfBuckets);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -933,6 +1004,12 @@ package org.apache.hadoop.hive.metastore.api;
}
first = false;
}
+ if (isSetNumberOfBuckets()) {
+ if (!first) sb.append(", ");
+ sb.append("numberOfBuckets:");
+ sb.append(this.numberOfBuckets);
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -964,6 +1041,8 @@ package org.apache.hadoop.hive.metastore.api;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -1072,6 +1151,14 @@ package org.apache.hadoop.hive.metastore.api;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 10: // NUMBER_OF_BUCKETS
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.numberOfBuckets = iprot.readI32();
+ struct.setNumberOfBucketsIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -1150,6 +1237,11 @@ package org.apache.hadoop.hive.metastore.api;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetNumberOfBuckets()) {
+ oprot.writeFieldBegin(NUMBER_OF_BUCKETS_FIELD_DESC);
+ oprot.writeI32(struct.numberOfBuckets);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -1189,7 +1281,10 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetPoolName()) {
optionals.set(5);
}
- oprot.writeBitSet(optionals, 6);
+ if (struct.isSetNumberOfBuckets()) {
+ optionals.set(6);
+ }
+ oprot.writeBitSet(optionals, 7);
if (struct.isSetPartitionname()) {
oprot.writeString(struct.partitionname);
}
@@ -1215,6 +1310,9 @@ package org.apache.hadoop.hive.metastore.api;
if (struct.isSetPoolName()) {
oprot.writeString(struct.poolName);
}
+ if (struct.isSetNumberOfBuckets()) {
+ oprot.writeI32(struct.numberOfBuckets);
+ }
}
@Override
@@ -1226,7 +1324,7 @@ package org.apache.hadoop.hive.metastore.api;
struct.setTablenameIsSet(true);
struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32());
struct.setTypeIsSet(true);
- java.util.BitSet incoming = iprot.readBitSet(6);
+ java.util.BitSet incoming = iprot.readBitSet(7);
if (incoming.get(0)) {
struct.partitionname = iprot.readString();
struct.setPartitionnameIsSet(true);
@@ -1262,6 +1360,10 @@ package org.apache.hadoop.hive.metastore.api;
struct.poolName = iprot.readString();
struct.setPoolNameIsSet(true);
}
+ if (incoming.get(6)) {
+ struct.numberOfBuckets = iprot.readI32();
+ struct.setNumberOfBucketsIsSet(true);
+ }
}
}
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionInfoStruct.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionInfoStruct.php
index 95c2a8c93e0..efbf2543bb4 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionInfoStruct.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionInfoStruct.php
@@ -107,6 +107,11 @@ class CompactionInfoStruct
'isRequired' => false,
'type' => TType::STRING,
),
+ 18 => array(
+ 'var' => 'numberOfBuckets',
+ 'isRequired' => false,
+ 'type' => TType::I32,
+ ),
);
/**
@@ -177,6 +182,10 @@ class CompactionInfoStruct
* @var string
*/
public $poolname = null;
+ /**
+ * @var int
+ */
+ public $numberOfBuckets = null;
public function __construct($vals = null)
{
@@ -232,6 +241,9 @@ class CompactionInfoStruct
if (isset($vals['poolname'])) {
$this->poolname = $vals['poolname'];
}
+ if (isset($vals['numberOfBuckets'])) {
+ $this->numberOfBuckets = $vals['numberOfBuckets'];
+ }
}
}
@@ -373,6 +385,13 @@ class CompactionInfoStruct
$xfer += $input->skip($ftype);
}
break;
+ case 18:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->numberOfBuckets);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -472,6 +491,11 @@ class CompactionInfoStruct
$xfer += $output->writeString($this->poolname);
$xfer += $output->writeFieldEnd();
}
+ if ($this->numberOfBuckets !== null) {
+ $xfer += $output->writeFieldBegin('numberOfBuckets', TType::I32, 18);
+ $xfer += $output->writeI32($this->numberOfBuckets);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionRequest.php
index c6b96284d6f..8672354e151 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionRequest.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionRequest.php
@@ -75,6 +75,11 @@ class CompactionRequest
'isRequired' => false,
'type' => TType::STRING,
),
+ 10 => array(
+ 'var' => 'numberOfBuckets',
+ 'isRequired' => false,
+ 'type' => TType::I32,
+ ),
);
/**
@@ -113,6 +118,10 @@ class CompactionRequest
* @var string
*/
public $poolName = null;
+ /**
+ * @var int
+ */
+ public $numberOfBuckets = null;
public function __construct($vals = null)
{
@@ -144,6 +153,9 @@ class CompactionRequest
if (isset($vals['poolName'])) {
$this->poolName = $vals['poolName'];
}
+ if (isset($vals['numberOfBuckets'])) {
+ $this->numberOfBuckets = $vals['numberOfBuckets'];
+ }
}
}
@@ -241,6 +253,13 @@ class CompactionRequest
$xfer += $input->skip($ftype);
}
break;
+ case 10:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->numberOfBuckets);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -308,6 +327,11 @@ class CompactionRequest
$xfer += $output->writeString($this->poolName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->numberOfBuckets !== null) {
+ $xfer += $output->writeFieldBegin('numberOfBuckets', TType::I32, 10);
+ $xfer += $output->writeI32($this->numberOfBuckets);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index ec0dd0035c5..041b55dc0ca 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -15321,11 +15321,12 @@ class CompactionRequest(object):
- initiatorId
- initiatorVersion
- poolName
+ - numberOfBuckets
"""
- def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, initiatorId=None, initiatorVersion=None, poolName=None,):
+ def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, initiatorId=None, initiatorVersion=None, poolName=None, numberOfBuckets=None,):
self.dbname = dbname
self.tablename = tablename
self.partitionname = partitionname
@@ -15335,6 +15336,7 @@ class CompactionRequest(object):
self.initiatorId = initiatorId
self.initiatorVersion = initiatorVersion
self.poolName = poolName
+ self.numberOfBuckets = numberOfBuckets
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
@@ -15396,6 +15398,11 @@ class CompactionRequest(object):
self.poolName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 10:
+ if ftype == TType.I32:
+ self.numberOfBuckets = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -15446,6 +15453,10 @@ class CompactionRequest(object):
oprot.writeFieldBegin('poolName', TType.STRING, 9)
oprot.writeString(self.poolName.encode('utf-8') if sys.version_info[0] == 2 else self.poolName)
oprot.writeFieldEnd()
+ if self.numberOfBuckets is not None:
+ oprot.writeFieldBegin('numberOfBuckets', TType.I32, 10)
+ oprot.writeI32(self.numberOfBuckets)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -15490,11 +15501,12 @@ class CompactionInfoStruct(object):
- enqueueTime
- retryRetention
- poolname
+ - numberOfBuckets
"""
- def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, toomanyaborts=None, state=None, workerId=None, start=None, highestWriteId=None, errorMessage=None, hasoldabort=None, enqueueTime=None, retryRetention=None, poolname=None,):
+ def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, toomanyaborts=None, state=None, workerId=None, start=None, highestWriteId=None, errorMessage=None, hasoldabort=None, enqueueTime=None, retryRetention=None, poolname=None, numberOfBuckets=None,):
self.id = id
self.dbname = dbname
self.tablename = tablename
@@ -15512,6 +15524,7 @@ class CompactionInfoStruct(object):
self.enqueueTime = enqueueTime
self.retryRetention = retryRetention
self.poolname = poolname
+ self.numberOfBuckets = numberOfBuckets
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
@@ -15607,6 +15620,11 @@ class CompactionInfoStruct(object):
self.poolname = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 18:
+ if ftype == TType.I32:
+ self.numberOfBuckets = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -15685,6 +15703,10 @@ class CompactionInfoStruct(object):
oprot.writeFieldBegin('poolname', TType.STRING, 17)
oprot.writeString(self.poolname.encode('utf-8') if sys.version_info[0] == 2 else self.poolname)
oprot.writeFieldEnd()
+ if self.numberOfBuckets is not None:
+ oprot.writeFieldBegin('numberOfBuckets', TType.I32, 18)
+ oprot.writeI32(self.numberOfBuckets)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -31200,6 +31222,7 @@ CompactionRequest.thrift_spec = (
(7, TType.STRING, 'initiatorId', 'UTF8', None, ), # 7
(8, TType.STRING, 'initiatorVersion', 'UTF8', None, ), # 8
(9, TType.STRING, 'poolName', 'UTF8', None, ), # 9
+ (10, TType.I32, 'numberOfBuckets', None, None, ), # 10
)
all_structs.append(CompactionInfoStruct)
CompactionInfoStruct.thrift_spec = (
@@ -31221,6 +31244,7 @@ CompactionInfoStruct.thrift_spec = (
(15, TType.I64, 'enqueueTime', None, None, ), # 15
(16, TType.I64, 'retryRetention', None, None, ), # 16
(17, TType.STRING, 'poolname', 'UTF8', None, ), # 17
+ (18, TType.I32, 'numberOfBuckets', None, None, ), # 18
)
all_structs.append(OptionalCompactionInfoStruct)
OptionalCompactionInfoStruct.thrift_spec = (
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 4196d11b087..9268ab93551 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -4464,6 +4464,7 @@ class CompactionRequest
INITIATORID = 7
INITIATORVERSION = 8
POOLNAME = 9
+ NUMBEROFBUCKETS = 10
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
@@ -4474,7 +4475,8 @@ class CompactionRequest
PROPERTIES => {:type => ::Thrift::Types::MAP, :name => 'properties', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}, :optional => true},
INITIATORID => {:type => ::Thrift::Types::STRING, :name => 'initiatorId', :optional => true},
INITIATORVERSION => {:type => ::Thrift::Types::STRING, :name => 'initiatorVersion', :optional => true},
- POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName', :optional => true}
+ POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName', :optional => true},
+ NUMBEROFBUCKETS => {:type => ::Thrift::Types::I32, :name => 'numberOfBuckets', :optional => true}
}
def struct_fields; FIELDS; end
@@ -4510,6 +4512,7 @@ class CompactionInfoStruct
ENQUEUETIME = 15
RETRYRETENTION = 16
POOLNAME = 17
+ NUMBEROFBUCKETS = 18
FIELDS = {
ID => {:type => ::Thrift::Types::I64, :name => 'id'},
@@ -4528,7 +4531,8 @@ class CompactionInfoStruct
HASOLDABORT => {:type => ::Thrift::Types::BOOL, :name => 'hasoldabort', :optional => true},
ENQUEUETIME => {:type => ::Thrift::Types::I64, :name => 'enqueueTime', :optional => true},
RETRYRETENTION => {:type => ::Thrift::Types::I64, :name => 'retryRetention', :optional => true},
- POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolname', :optional => true}
+ POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolname', :optional => true},
+ NUMBEROFBUCKETS => {:type => ::Thrift::Types::I32, :name => 'numberOfBuckets', :optional => true}
}
def struct_fields; FIELDS; end
diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 869f8f4b392..f2faf732296 100644
--- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -1290,6 +1290,7 @@ struct CompactionRequest {
7: optional string initiatorId
8: optional string initiatorVersion
9: optional string poolName
+ 10: optional i32 numberOfBuckets
}
struct CompactionInfoStruct {
@@ -1310,6 +1311,7 @@ struct CompactionInfoStruct {
15: optional i64 enqueueTime,
16: optional i64 retryRetention,
17: optional string poolname
+ 18: optional i32 numberOfBuckets
}
struct OptionalCompactionInfoStruct {
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
index 6e2a0a213d4..a5c6b05a3df 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
@@ -63,6 +63,7 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
public long txnId = 0;
public long commitTime = 0;
public String poolName;
+ public int numberOfBuckets = 0;
/**
* The highest write id that the compaction job will pay attention to.
@@ -154,6 +155,7 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
"state:" + state + "," +
"type:" + type + "," +
"enqueueTime:" + enqueueTime + "," +
+ "commitTime:" + commitTime + "," +
"start:" + start + "," +
"properties:" + properties + "," +
"runAs:" + runAs + "," +
@@ -162,8 +164,14 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
"highestWriteId:" + highestWriteId + "," +
"errorMessage:" + errorMessage + "," +
"workerId: " + workerId + "," +
+ "workerVersion: " + workerVersion + "," +
"initiatorId: " + initiatorId + "," +
- "retryRetention" + retryRetention;
+ "initiatorVersion: " + initiatorVersion + "," +
+ "retryRetention" + retryRetention + "," +
+ "txnId" + txnId + "," +
+ "nextTxnId" + nextTxnId + "," +
+ "poolname" + poolName + "," +
+ "numberOfBuckets" + numberOfBuckets;
}
@Override
@@ -215,6 +223,7 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
fullCi.txnId = rs.getLong(21);
fullCi.commitTime = rs.getLong(22);
fullCi.poolName = rs.getString(23);
+ fullCi.numberOfBuckets = rs.getInt(24);
return fullCi;
}
static void insertIntoCompletedCompactions(PreparedStatement pStmt, CompactionInfo ci, long endTime) throws SQLException, MetaException {
@@ -241,6 +250,7 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
pStmt.setLong(21, ci.txnId);
pStmt.setLong(22, ci.commitTime);
pStmt.setString(23, ci.poolName);
+ pStmt.setInt(24, ci.numberOfBuckets);
}
public static CompactionInfo compactionStructToInfo(CompactionInfoStruct cr) {
@@ -278,9 +288,12 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
if (cr.isSetRetryRetention()) {
ci.retryRetention = cr.getRetryRetention();
}
- if(cr.isSetPoolname()) {
+ if (cr.isSetPoolname()) {
ci.poolName = cr.getPoolname();
}
+ if (cr.isSetNumberOfBuckets()) {
+ ci.numberOfBuckets = cr.getNumberOfBuckets();
+ }
return ci;
}
@@ -302,6 +315,7 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
cr.setEnqueueTime(ci.enqueueTime);
cr.setRetryRetention(ci.retryRetention);
cr.setPoolname(ci.poolName);
+ cr.setNumberOfBuckets(ci.numberOfBuckets);
return cr;
}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 96f9365038b..59350e32a3a 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -66,8 +66,9 @@ class CompactionTxnHandler extends TxnHandler {
+ "\"CQ_STATE\", \"CQ_TYPE\", \"CQ_TBLPROPERTIES\", \"CQ_WORKER_ID\", \"CQ_START\", \"CQ_RUN_AS\", "
+ "\"CQ_HIGHEST_WRITE_ID\", \"CQ_META_INFO\", \"CQ_HADOOP_JOB_ID\", \"CQ_ERROR_MESSAGE\", "
+ "\"CQ_ENQUEUE_TIME\", \"CQ_WORKER_VERSION\", \"CQ_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\", "
- + "\"CQ_RETRY_RETENTION\", \"CQ_NEXT_TXN_ID\", \"CQ_TXN_ID\", \"CQ_COMMIT_TIME\", \"CQ_POOL_NAME\""
- + " FROM \"COMPACTION_QUEUE\" WHERE \"CQ_TXN_ID\" = ?";
+ + "\"CQ_RETRY_RETENTION\", \"CQ_NEXT_TXN_ID\", \"CQ_TXN_ID\", \"CQ_COMMIT_TIME\", \"CQ_POOL_NAME\", "
+ + "\"CQ_NUMBER_OF_BUCKETS\" "
+ + "FROM \"COMPACTION_QUEUE\" WHERE \"CQ_TXN_ID\" = ?";
private static final String SELECT_COMPACTION_METRICS_CACHE_QUERY =
"SELECT \"CMC_METRIC_VALUE\", \"CMC_VERSION\" FROM \"COMPACTION_METRICS_CACHE\" " +
"WHERE \"CMC_DATABASE\" = ? AND \"CMC_TABLE\" = ? AND \"CMC_METRIC_TYPE\" = ?";
@@ -246,7 +247,8 @@ class CompactionTxnHandler extends TxnHandler {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED, connPoolCompaction);
StringBuilder sb = new StringBuilder();
sb.append("SELECT \"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " +
- "\"CQ_TYPE\", \"CQ_POOL_NAME\", \"CQ_TBLPROPERTIES\" FROM \"COMPACTION_QUEUE\" WHERE \"CQ_STATE\" = '" + INITIATED_STATE + "' AND ");
+ "\"CQ_TYPE\", \"CQ_POOL_NAME\", \"CQ_NUMBER_OF_BUCKETS\", \"CQ_TBLPROPERTIES\" FROM \"COMPACTION_QUEUE\" " +
+ "WHERE \"CQ_STATE\" = '" + INITIATED_STATE + "' AND ");
boolean hasPoolName = StringUtils.isNotBlank(rqst.getPoolName());
if(hasPoolName) {
sb.append("\"CQ_POOL_NAME\"=?");
@@ -276,7 +278,8 @@ class CompactionTxnHandler extends TxnHandler {
info.partName = rs.getString(4);
info.type = TxnUtils.dbCompactionType2ThriftType(rs.getString(5).charAt(0));
info.poolName = rs.getString(6);
- info.properties = rs.getString(7);
+ info.numberOfBuckets = rs.getInt(7);
+ info.properties = rs.getString(8);
info.workerId = rqst.getWorkerId();
String workerId = rqst.getWorkerId();
@@ -553,13 +556,13 @@ class CompactionTxnHandler extends TxnHandler {
+ "\"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HIGHEST_WRITE_ID\", \"CC_META_INFO\", "
+ "\"CC_HADOOP_JOB_ID\", \"CC_ERROR_MESSAGE\", \"CC_ENQUEUE_TIME\", "
+ "\"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\", "
- + "\"CC_NEXT_TXN_ID\", \"CC_TXN_ID\", \"CC_COMMIT_TIME\") "
+ + "\"CC_NEXT_TXN_ID\", \"CC_TXN_ID\", \"CC_COMMIT_TIME\", \"CC_POOL_NAME\", \"CC_NUMBER_OF_BUCKETS\") "
+ "SELECT \"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", "
+ quoteChar(SUCCEEDED_STATE) + ", \"CQ_TYPE\", \"CQ_TBLPROPERTIES\", \"CQ_WORKER_ID\", \"CQ_START\", "
+ getEpochFn(dbProduct) + ", \"CQ_RUN_AS\", \"CQ_HIGHEST_WRITE_ID\", \"CQ_META_INFO\", "
+ "\"CQ_HADOOP_JOB_ID\", \"CQ_ERROR_MESSAGE\", \"CQ_ENQUEUE_TIME\", "
+ "\"CQ_WORKER_VERSION\", \"CQ_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\", "
- + "\"CQ_NEXT_TXN_ID\", \"CQ_TXN_ID\", \"CQ_COMMIT_TIME\" "
+ + "\"CQ_NEXT_TXN_ID\", \"CQ_TXN_ID\", \"CQ_COMMIT_TIME\", \"CQ_POOL_NAME\", \"CQ_NUMBER_OF_BUCKETS\" "
+ "FROM \"COMPACTION_QUEUE\" WHERE \"CQ_ID\" = ?";
pStmt = dbConn.prepareStatement(s);
pStmt.setLong(1, info.id);
@@ -1379,8 +1382,8 @@ class CompactionTxnHandler extends TxnHandler {
+ "\"CQ_STATE\", \"CQ_TYPE\", \"CQ_TBLPROPERTIES\", \"CQ_WORKER_ID\", \"CQ_START\", \"CQ_RUN_AS\", "
+ "\"CQ_HIGHEST_WRITE_ID\", \"CQ_META_INFO\", \"CQ_HADOOP_JOB_ID\", \"CQ_ERROR_MESSAGE\", "
+ "\"CQ_ENQUEUE_TIME\", \"CQ_WORKER_VERSION\", \"CQ_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\", "
- + "\"CQ_RETRY_RETENTION\", \"CQ_NEXT_TXN_ID\", \"CQ_TXN_ID\", \"CQ_COMMIT_TIME\", \"CQ_POOL_NAME\" "
- + "FROM \"COMPACTION_QUEUE\" WHERE \"CQ_ID\" = ?");
+ + "\"CQ_RETRY_RETENTION\", \"CQ_NEXT_TXN_ID\", \"CQ_TXN_ID\", \"CQ_COMMIT_TIME\", \"CQ_POOL_NAME\", "
+ + "\"CQ_NUMBER_OF_BUCKETS\" FROM \"COMPACTION_QUEUE\" WHERE \"CQ_ID\" = ?");
pStmt.setLong(1, ci.id);
rs = pStmt.executeQuery();
if (rs.next()) {
@@ -1423,8 +1426,8 @@ class CompactionTxnHandler extends TxnHandler {
+ "\"CC_TBLPROPERTIES\", \"CC_WORKER_ID\", \"CC_START\", \"CC_END\", \"CC_RUN_AS\", "
+ "\"CC_HIGHEST_WRITE_ID\", \"CC_META_INFO\", \"CC_HADOOP_JOB_ID\", \"CC_ERROR_MESSAGE\", "
+ "\"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\","
- + "\"CC_NEXT_TXN_ID\", \"CC_TXN_ID\", \"CC_COMMIT_TIME\", \"CC_POOL_NAME\") "
- + "VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?,?,?,?,?,?,?,?,?,?)");
+ + "\"CC_NEXT_TXN_ID\", \"CC_TXN_ID\", \"CC_COMMIT_TIME\", \"CC_POOL_NAME\", \"CC_NUMBER_OF_BUCKETS\") "
+ + "VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?,?,?,?,?,?,?,?,?,?,?)");
CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn));
int updCount = pStmt.executeUpdate();
LOG.debug("Inserted {} entries into COMPLETED_COMPACTIONS", updCount);
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 73858046f62..77863e068ee 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -3733,6 +3733,9 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
String partName = rqst.getPartitionname();
if (partName != null) buf.append("\"CQ_PARTITION\", ");
buf.append("\"CQ_STATE\", \"CQ_TYPE\", \"CQ_ENQUEUE_TIME\", \"CQ_POOL_NAME\"");
+ if (rqst.isSetNumberOfBuckets()) {
+ buf.append(", \"CQ_NUMBER_OF_BUCKETS\"");
+ }
if (rqst.getProperties() != null) {
buf.append(", \"CQ_TBLPROPERTIES\"");
}
@@ -3765,6 +3768,9 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
buf.append(getEpochFn(dbProduct));
buf.append(", ?");
params.add(rqst.getPoolName());
+ if (rqst.isSetNumberOfBuckets()) {
+ buf.append(", ").append(rqst.getNumberOfBuckets());
+ }
if (rqst.getProperties() != null) {
buf.append(", ?");
params.add(new StringableMap(rqst.getProperties()).toString());
diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index fb85ad96582..2593e64c635 100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -635,7 +635,8 @@ CREATE TABLE COMPACTION_QUEUE (
CQ_WORKER_VERSION varchar(128),
CQ_CLEANER_START bigint,
CQ_RETRY_RETENTION bigint NOT NULL DEFAULT 0,
- CQ_POOL_NAME varchar(128)
+ CQ_POOL_NAME varchar(128),
+ CQ_NUMBER_OF_BUCKETS integer
);
CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
@@ -666,7 +667,8 @@ CREATE TABLE COMPLETED_COMPACTIONS (
CC_INITIATOR_ID varchar(128),
CC_INITIATOR_VERSION varchar(128),
CC_WORKER_VERSION varchar(128),
- CC_POOL_NAME varchar(128)
+ CC_POOL_NAME varchar(128),
+ CC_NUMBER_OF_BUCKETS integer
);
CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION);
diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-alpha-2-to-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-alpha-2-to-4.0.0.derby.sql
index 61b9bfd8021..9e46203c45f 100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-alpha-2-to-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-alpha-2-to-4.0.0.derby.sql
@@ -4,5 +4,9 @@
ALTER TABLE "APP"."TAB_COL_STATS" ADD HISTOGRAM BLOB;
ALTER TABLE "APP"."PART_COL_STATS" ADD HISTOGRAM BLOB;
+-- HIVE-26719
+ALTER TABLE COMPACTION_QUEUE ADD CQ_NUMBER_OF_BUCKETS INTEGER;
+ALTER TABLE COMPLETED_COMPACTIONS ADD CQ_NUMBER_OF_BUCKETS INTEGER;
+
-- This needs to be the last thing done. Insert any changes above this line.
UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index d62fd216061..239f0f1f56d 100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@ -1055,6 +1055,7 @@ CREATE TABLE COMPACTION_QUEUE(
CQ_CLEANER_START bigint NULL,
CQ_RETRY_RETENTION bigint NOT NULL DEFAULT 0,
CQ_POOL_NAME nvarchar(128) NULL,
+ CQ_NUMBER_OF_BUCKETS integer,
PRIMARY KEY CLUSTERED
(
CQ_ID ASC
@@ -1085,6 +1086,7 @@ CREATE TABLE COMPLETED_COMPACTIONS (
CC_INITIATOR_VERSION nvarchar(128) NULL,
CC_WORKER_VERSION nvarchar(128) NULL,
CC_POOL_NAME nvarchar(128) NULL,
+ CC_NUMBER_OF_BUCKETS integer,
PRIMARY KEY CLUSTERED
(
CC_ID ASC
diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-alpha-2-to-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-alpha-2-to-4.0.0.mssql.sql
index 88f7592639e..16aaca19269 100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-alpha-2-to-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-alpha-2-to-4.0.0.mssql.sql
@@ -4,6 +4,10 @@ SELECT 'Upgrading MetaStore schema from 4.0.0-alpha-2 to 4.0.0' AS MESSAGE;
ALTER TABLE TAB_COL_STATS ADD HISTOGRAM varbinary(max);
ALTER TABLE PART_COL_STATS ADD HISTOGRAM varbinary(max);
+-- HIVE-26719
+ALTER TABLE COMPACTION_QUEUE ADD CQ_NUMBER_OF_BUCKETS INTEGER NULL;
+ALTER TABLE COMPLETED_COMPACTIONS ADD CQ_NUMBER_OF_BUCKETS INTEGER NULL;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 4.0.0-alpha-2 to 4.0.0' AS MESSAGE;
diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index 1546523b6b2..c38b2c072d0 100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@ -1096,7 +1096,8 @@ CREATE TABLE COMPACTION_QUEUE (
CQ_WORKER_VERSION varchar(128),
CQ_CLEANER_START bigint,
CQ_RETRY_RETENTION bigint NOT NULL DEFAULT 0,
- CQ_POOL_NAME varchar(128)
+ CQ_POOL_NAME varchar(128),
+ CQ_NUMBER_OF_BUCKETS integer
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE TABLE COMPLETED_COMPACTIONS (
@@ -1122,7 +1123,8 @@ CREATE TABLE COMPLETED_COMPACTIONS (
CC_INITIATOR_ID varchar(128),
CC_INITIATOR_VERSION varchar(128),
CC_WORKER_VERSION varchar(128),
- CC_POOL_NAME varchar(128)
+ CC_POOL_NAME varchar(128),
+ CC_NUMBER_OF_BUCKETS integer
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION);
diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-2-to-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-2-to-4.0.0.mysql.sql
index 8378a90a829..467e0a00a27 100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-2-to-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-2-to-4.0.0.mysql.sql
@@ -4,6 +4,10 @@ SELECT 'Upgrading MetaStore schema from 4.0.0-alpha-2 to 4.0.0' AS MESSAGE;
ALTER TABLE TAB_COL_STATS ADD HISTOGRAM blob;
ALTER TABLE PART_COL_STATS ADD HISTOGRAM blob;
+-- HIVE-26719
+ALTER TABLE `COMPACTION_QUEUE` ADD COLUMN `CQ_NUMBER_OF_BUCKETS` INTEGER;
+ALTER TABLE `COMPLETED_COMPACTIONS` ADD COLUMN `CQ_NUMBER_OF_BUCKETS` INTEGER;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 4.0.0-alpha-2 to 4.0.0' AS MESSAGE;
diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index 2c56fbecebb..4556d2ddf5a 100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@ -1098,7 +1098,8 @@ CREATE TABLE COMPACTION_QUEUE (
CQ_WORKER_VERSION varchar(128),
CQ_CLEANER_START NUMBER(19),
CQ_RETRY_RETENTION NUMBER(19) DEFAULT 0 NOT NULL,
- CQ_POOL_NAME varchar(128)
+ CQ_POOL_NAME varchar(128),
+ CQ_NUMBER_OF_BUCKETS integer
) ROWDEPENDENCIES;
CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
@@ -1129,7 +1130,8 @@ CREATE TABLE COMPLETED_COMPACTIONS (
CC_INITIATOR_ID varchar(128),
CC_INITIATOR_VERSION varchar(128),
CC_WORKER_VERSION varchar(128),
- CC_POOL_NAME varchar(128)
+ CC_POOL_NAME varchar(128),
+ CC_NUMBER_OF_BUCKETS integer
) ROWDEPENDENCIES;
CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION);
diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-alpha-2-to-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-alpha-2-to-4.0.0.oracle.sql
index 0c3611b7470..acbce4d9bc2 100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-alpha-2-to-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-alpha-2-to-4.0.0.oracle.sql
@@ -4,6 +4,10 @@ SELECT 'Upgrading MetaStore schema from 4.0.0-alpha-2 to 4.0.0' AS Status from d
ALTER TABLE TAB_COL_STATS ADD HISTOGRAM BLOB;
ALTER TABLE PART_COL_STATS ADD HISTOGRAM BLOB;
+-- HIVE-26719
+ALTER TABLE COMPACTION_QUEUE ADD CQ_NUMBER_OF_BUCKETS INTEGER;
+ALTER TABLE COMPLETED_COMPACTIONS ADD CQ_NUMBER_OF_BUCKETS INTEGER;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 4.0.0-alpha-2 to 4.0.0' AS Status from dual;
diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index abc4633a2ec..c90b471387c 100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@ -1813,7 +1813,8 @@ CREATE TABLE "COMPACTION_QUEUE" (
"CQ_WORKER_VERSION" varchar(128),
"CQ_CLEANER_START" bigint,
"CQ_RETRY_RETENTION" bigint not null default 0,
- "CQ_POOL_NAME" varchar(128)
+ "CQ_POOL_NAME" varchar(128),
+ "CQ_NUMBER_OF_BUCKETS" integer
);
CREATE TABLE "NEXT_COMPACTION_QUEUE_ID" (
@@ -1844,7 +1845,8 @@ CREATE TABLE "COMPLETED_COMPACTIONS" (
"CC_INITIATOR_ID" varchar(128),
"CC_INITIATOR_VERSION" varchar(128),
"CC_WORKER_VERSION" varchar(128),
- "CC_POOL_NAME" varchar(128)
+ "CC_POOL_NAME" varchar(128),
+ "CC_NUMBER_OF_BUCKETS" integer
);
CREATE INDEX "COMPLETED_COMPACTIONS_RES" ON "COMPLETED_COMPACTIONS" ("CC_DATABASE","CC_TABLE","CC_PARTITION");
diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-alpha-2-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-alpha-2-to-4.0.0.postgres.sql
index 8631662888e..77ec0c54961 100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-alpha-2-to-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-alpha-2-to-4.0.0.postgres.sql
@@ -4,6 +4,10 @@ SELECT 'Upgrading MetaStore schema from 4.0.0-alpha-2 to 4.0.0';
ALTER TABLE "TAB_COL_STATS" ADD "HISTOGRAM" bytea;
ALTER TABLE "PART_COL_STATS" ADD "HISTOGRAM" bytea;
+-- HIVE-26719
+ALTER TABLE "COMPACTION_QUEUE" ADD "CQ_NUMBER_OF_BUCKETS" INTEGER;
+ALTER TABLE "COMPLETED_COMPACTIONS" ADD "CQ_NUMBER_OF_BUCKETS" INTEGER;
+
-- These lines need to be last. Insert any changes above.
UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
SELECT 'Finished upgrading MetaStore schema from 4.0.0-alpha-2 to 4.0.0';
diff --git a/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0.postgres.sql
index cf0b82a1f12..255af39873d 100644
--- a/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0.postgres.sql
@@ -143,6 +143,10 @@ ALTER TABLE "COMPLETED_COMPACTIONS" ADD "CC_COMMIT_TIME" bigint;
ALTER TABLE "COMPACTION_QUEUE" ADD "CQ_POOL_NAME" VARCHAR(128);
ALTER TABLE "COMPLETED_COMPACTIONS" ADD "CC_POOL_NAME" VARCHAR(128);
+-- HIVE-26719
+ALTER TABLE "COMPACTION_QUEUE" ADD "CQ_NUMBER_OF_BUCKETS" INTEGER;
+ALTER TABLE "COMPLETED_COMPACTIONS" ADD "CQ_NUMBER_OF_BUCKETS" INTEGER;
+
-- These lines need to be last. Insert any changes above.
UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
SELECT 'Finished upgrading MetaStore schema from 3.1.3000 to 4.0.0';