You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kg...@apache.org on 2020/04/22 08:41:08 UTC
[hive] branch master updated (8b9fadb -> c891fc5)
This is an automated email from the ASF dual-hosted git repository.
kgyrtkirk pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git.
from 8b9fadb HIVE-23169 : Probe runtime support for LLAP (Panagiotis Garefalakis via Ashutosh Chauhan)
new 998be10 HIVE-23246: Reduce MiniDruidCluster memory requeirements (Zoltan Haindrich reviewed by Peter Vary)
new 1186843 HIVE-23249: Prevent infinite loop in TestJdbcWithMiniLlapArrow (Zoltan Haindrich reviewed by Peter Vary)
new 4ef051c HIVE-23250: Scheduled query related qtests may not finish before it's expected (Zoltan Haindrich reviewed by Peter Vary)
new c891fc5 HIVE-23251: Provide a way to have only a selection of datasets loaded (Zoltan Haindrich reviewed by László Bodor)
The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails. The revisions
listed as "add" were already present in the repository and have only
been added to this reference.
Summary of changes:
.../hive/jdbc/TestJdbcWithMiniLlapArrow.java | 3 +-
.../org/apache/hive/druid/MiniDruidCluster.java | 20 +-
.../hive/ql/dataset/QTestDatasetHandler.java | 72 ++++--
.../hive/ql/schq/TestScheduledQueryStatements.java | 5 +-
.../test/queries/clientpositive/authorization_9.q | 1 +
ql/src/test/queries/clientpositive/schq_analyze.q | 2 +-
ql/src/test/queries/clientpositive/schq_ingest.q | 2 +-
.../queries/clientpositive/schq_materialized.q | 2 +-
ql/src/test/queries/clientpositive/sysdb.q | 2 +-
.../test/results/clientpositive/llap/sysdb.q.out | 254 +++------------------
10 files changed, 107 insertions(+), 256 deletions(-)
[hive] 03/04: HIVE-23250: Scheduled query related qtests may not
finish before it's expected (Zoltan Haindrich reviewed by Peter Vary)
Posted by kg...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
commit 4ef051c8f6a654cc78352a1250b2d80410fa2b37
Author: Zoltan Haindrich <ki...@rxd.hu>
AuthorDate: Wed Apr 22 08:11:15 2020 +0000
HIVE-23250: Scheduled query related qtests may not finish before it's expected (Zoltan Haindrich reviewed by Peter Vary)
Signed-off-by: Zoltan Haindrich <zh...@cloudera.com>
---
.../org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java | 5 ++++-
ql/src/test/queries/clientpositive/schq_analyze.q | 2 +-
ql/src/test/queries/clientpositive/schq_ingest.q | 2 +-
ql/src/test/queries/clientpositive/schq_materialized.q | 2 +-
4 files changed, 7 insertions(+), 4 deletions(-)
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java b/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java
index f2fc421..4f7990f 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java
@@ -205,6 +205,9 @@ public class TestScheduledQueryStatements {
@Test
public void testExecuteImmediate() throws ParseException, Exception {
+ // use a different namespace because the schq executor might be able to
+ // catch the new schq execution immediately
+ env_setup.getTestCtx().hiveConf.setVar(ConfVars.HIVE_SCHEDULED_QUERIES_NAMESPACE, "immed");
IDriver driver = createDriver();
driver.run("set role admin");
@@ -213,7 +216,7 @@ public class TestScheduledQueryStatements {
driver.run("alter scheduled query immed execute");
try (CloseableObjectStore os = new CloseableObjectStore(env_setup.getTestCtx().hiveConf)) {
- Optional<MScheduledQuery> sq = os.getMScheduledQuery(new ScheduledQueryKey("immed", "hive"));
+ Optional<MScheduledQuery> sq = os.getMScheduledQuery(new ScheduledQueryKey("immed", "immed"));
assertTrue(sq.isPresent());
assertThat(sq.get().getNextExecution(), Matchers.lessThanOrEqualTo((int) (System.currentTimeMillis() / 1000)));
int cnt1 = ScheduledQueryExecutionService.getForcedScheduleCheckCount();
diff --git a/ql/src/test/queries/clientpositive/schq_analyze.q b/ql/src/test/queries/clientpositive/schq_analyze.q
index 3c03360..246a215 100644
--- a/ql/src/test/queries/clientpositive/schq_analyze.q
+++ b/ql/src/test/queries/clientpositive/schq_analyze.q
@@ -21,7 +21,7 @@ create scheduled query t_analyze cron '0 */1 * * * ? *' as analyze table t compu
alter scheduled query t_analyze execute;
-!sleep 10;
+!sleep 30;
select * from information_schema.scheduled_executions s where schedule_name='ex_analyze' order by scheduled_execution_id desc limit 3;
diff --git a/ql/src/test/queries/clientpositive/schq_ingest.q b/ql/src/test/queries/clientpositive/schq_ingest.q
index b7bc90c..8ffc722 100644
--- a/ql/src/test/queries/clientpositive/schq_ingest.q
+++ b/ql/src/test/queries/clientpositive/schq_ingest.q
@@ -39,7 +39,7 @@ insert into s values(2,2),(3,3);
-- pretend that a timeout have happened
alter scheduled query ingest execute;
-!sleep 10;
+!sleep 30;
select state,error_message from sys.scheduled_executions;
select * from t order by id;
diff --git a/ql/src/test/queries/clientpositive/schq_materialized.q b/ql/src/test/queries/clientpositive/schq_materialized.q
index 7242f3e..46b725e 100644
--- a/ql/src/test/queries/clientpositive/schq_materialized.q
+++ b/ql/src/test/queries/clientpositive/schq_materialized.q
@@ -68,7 +68,7 @@ select `(NEXT_EXECUTION|SCHEDULED_QUERY_ID)?+.+` from sys.scheduled_queries;
alter scheduled query d execute;
-!sleep 10;
+!sleep 30;
-- the scheduled execution will fail - because of missing TXN; but overall it works..
select state,error_message from sys.scheduled_executions;
[hive] 02/04: HIVE-23249: Prevent infinite loop in
TestJdbcWithMiniLlapArrow (Zoltan Haindrich reviewed by Peter Vary)
Posted by kg...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
commit 1186843bbbaa079c22103198f1ac36edfd1fd9a1
Author: Zoltan Haindrich <ki...@rxd.hu>
AuthorDate: Wed Apr 22 08:06:53 2020 +0000
HIVE-23249: Prevent infinite loop in TestJdbcWithMiniLlapArrow (Zoltan Haindrich reviewed by Peter Vary)
Signed-off-by: Zoltan Haindrich <zh...@cloudera.com>
---
.../src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java
index 1aab03d..bc2480a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java
@@ -358,7 +358,7 @@ public class TestJdbcWithMiniLlapArrow extends BaseJdbcWithMiniLlap {
// wait for other thread to create the stmt handle
int count = 0;
- while (count < 10) {
+ while (++count <= 10) {
try {
tKillHolder.throwable = null;
Thread.sleep(2000);
@@ -380,7 +380,6 @@ public class TestJdbcWithMiniLlapArrow extends BaseJdbcWithMiniLlap {
stmt2.close();
break;
} catch (SQLException e) {
- count++;
LOG.warn("Exception when kill query", e);
tKillHolder.throwable = e;
}
[hive] 04/04: HIVE-23251: Provide a way to have only a selection of datasets loaded (Zoltan Haindrich reviewed by László Bodor)
Posted by kg...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
commit c891fc594ddbd73994381c629cb5ca67555f7332
Author: Zoltan Haindrich <ki...@rxd.hu>
AuthorDate: Wed Apr 22 08:21:35 2020 +0000
HIVE-23251: Provide a way to have only a selection of datasets loaded (Zoltan Haindrich reviewed by László Bodor)
Signed-off-by: Zoltan Haindrich <zh...@cloudera.com>
---
.../hive/ql/dataset/QTestDatasetHandler.java | 72 ++++--
.../test/queries/clientpositive/authorization_9.q | 1 +
ql/src/test/queries/clientpositive/sysdb.q | 2 +-
.../test/results/clientpositive/llap/sysdb.q.out | 254 +++------------------
4 files changed, 90 insertions(+), 239 deletions(-)
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/dataset/QTestDatasetHandler.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/dataset/QTestDatasetHandler.java
index 85ece49..24748fc 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/dataset/QTestDatasetHandler.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/dataset/QTestDatasetHandler.java
@@ -42,6 +42,7 @@ import org.slf4j.LoggerFactory;
*
* <pre>
* --! qt:dataset:sample
+ * --! qt:dataset:sample:ONLY
* </pre>
*
* will make sure that the dataset named sample is loaded prior to executing the test.
@@ -52,6 +53,7 @@ public class QTestDatasetHandler implements QTestOptionHandler {
private File datasetDir;
private static Set<String> srcTables;
private static Set<String> missingTables = new HashSet<>();
+ Set<String> tablesToUnload = new HashSet<>();
public QTestDatasetHandler(HiveConf conf) {
// Use path relative to dataDir directory if it is not specified
@@ -90,6 +92,17 @@ public class QTestDatasetHandler implements QTestOptionHandler {
return true;
}
+ public boolean unloadDataset(String table, CliDriver cliDriver) throws Exception {
+ try {
+ CommandProcessorResponse result = cliDriver.processLine("drop table " + table);
+ LOG.info("Result from cliDrriver.processLine in initFromDatasets=" + result);
+ } catch (CommandProcessorException e) {
+ Assert.fail("Failed during initFromDatasets processLine with code=" + e);
+ }
+
+ return true;
+ }
+
public static Set<String> getSrcTables() {
if (srcTables == null) {
initSrcTables();
@@ -102,6 +115,11 @@ public class QTestDatasetHandler implements QTestOptionHandler {
storeSrcTables();
}
+ private void removeSrcTable(String table) {
+ srcTables.remove(table);
+ storeSrcTables();
+ }
+
public static Set<String> initSrcTables() {
if (srcTables == null) {
initSrcTablesFromSystemProperty();
@@ -133,33 +151,53 @@ public class QTestDatasetHandler implements QTestOptionHandler {
@Override
public void processArguments(String arguments) {
- String[] tables = arguments.split(",");
+ String[] args = arguments.split(":");
+ Set<String> tableNames = getTableNames(args[0]);
synchronized (QTestUtil.class) {
- for (String string : tables) {
- string = string.trim();
- if (string.length() == 0) {
- continue;
- }
- if (srcTables == null || !srcTables.contains(string)) {
- missingTables.add(string);
+ if (args.length > 1) {
+ if (args.length > 2 || !args[1].equalsIgnoreCase("ONLY")) {
+ throw new RuntimeException("unknown option: " + args[1]);
}
+ tablesToUnload.addAll(getSrcTables());
+ tablesToUnload.removeAll(tableNames);
}
+ tableNames.removeAll(getSrcTables());
+ missingTables.addAll(tableNames);
}
}
+ private Set<String> getTableNames(String arguments) {
+ Set<String> ret = new HashSet<String>();
+ String[] tables = arguments.split(",");
+ for (String string : tables) {
+ string = string.trim();
+ if (string.length() == 0) {
+ continue;
+ }
+ ret.add(string);
+ }
+ return ret;
+ }
+
@Override
public void beforeTest(QTestUtil qt) throws Exception {
- if (!missingTables.isEmpty()) {
- synchronized (QTestUtil.class) {
- qt.newSession(true);
- for (String table : missingTables) {
- if (initDataset(table, qt.getCliDriver())) {
- addSrcTable(table);
- }
+ if (missingTables.isEmpty() && tablesToUnload.isEmpty()) {
+ return;
+ }
+ synchronized (QTestUtil.class) {
+ qt.newSession(true);
+ for (String table : missingTables) {
+ if (initDataset(table, qt.getCliDriver())) {
+ addSrcTable(table);
}
- missingTables.clear();
- qt.newSession(true);
}
+ for (String table : tablesToUnload) {
+ removeSrcTable(table);
+ unloadDataset(table, qt.getCliDriver());
+ }
+ missingTables.clear();
+ tablesToUnload.clear();
+ qt.newSession(true);
}
}
diff --git a/ql/src/test/queries/clientpositive/authorization_9.q b/ql/src/test/queries/clientpositive/authorization_9.q
index 40b5e86..9075983 100644
--- a/ql/src/test/queries/clientpositive/authorization_9.q
+++ b/ql/src/test/queries/clientpositive/authorization_9.q
@@ -1,3 +1,4 @@
+--! qt:dataset::ONLY
set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
-- SORT_BEFORE_DIFF
diff --git a/ql/src/test/queries/clientpositive/sysdb.q b/ql/src/test/queries/clientpositive/sysdb.q
index 5936954..2463ce6 100644
--- a/ql/src/test/queries/clientpositive/sysdb.q
+++ b/ql/src/test/queries/clientpositive/sysdb.q
@@ -1,4 +1,4 @@
---! qt:dataset:alltypesorc,alltypesparquet,part,src,src1,srcbucket,srcbucket2,src_cbo,src_json,src_sequencefile,src_thrift,srcpart,cbo_t1,cbo_t2,cbo_t3,lineitem
+--! qt:dataset:src,part,srcbucket:ONLY
--! qt:sysdb
set hive.strict.checks.cartesian.product=false;
diff --git a/ql/src/test/results/clientpositive/llap/sysdb.q.out b/ql/src/test/results/clientpositive/llap/sysdb.q.out
index 8b0be82..619c0d2 100644
--- a/ql/src/test/results/clientpositive/llap/sysdb.q.out
+++ b/ql/src/test/results/clientpositive/llap/sysdb.q.out
@@ -72,30 +72,6 @@ PREHOOK: query: show grant user hive_test_user
PREHOOK: type: SHOW_GRANT
POSTHOOK: query: show grant user hive_test_user
POSTHOOK: type: SHOW_GRANT
-default alltypesorc hive_test_user USER DELETE true -1 hive_test_user
-default alltypesorc hive_test_user USER INSERT true -1 hive_test_user
-default alltypesorc hive_test_user USER SELECT true -1 hive_test_user
-default alltypesorc hive_test_user USER UPDATE true -1 hive_test_user
-default alltypesparquet hive_test_user USER DELETE true -1 hive_test_user
-default alltypesparquet hive_test_user USER INSERT true -1 hive_test_user
-default alltypesparquet hive_test_user USER SELECT true -1 hive_test_user
-default alltypesparquet hive_test_user USER UPDATE true -1 hive_test_user
-default cbo_t1 hive_test_user USER DELETE true -1 hive_test_user
-default cbo_t1 hive_test_user USER INSERT true -1 hive_test_user
-default cbo_t1 hive_test_user USER SELECT true -1 hive_test_user
-default cbo_t1 hive_test_user USER UPDATE true -1 hive_test_user
-default cbo_t2 hive_test_user USER DELETE true -1 hive_test_user
-default cbo_t2 hive_test_user USER INSERT true -1 hive_test_user
-default cbo_t2 hive_test_user USER SELECT true -1 hive_test_user
-default cbo_t2 hive_test_user USER UPDATE true -1 hive_test_user
-default cbo_t3 hive_test_user USER DELETE true -1 hive_test_user
-default cbo_t3 hive_test_user USER INSERT true -1 hive_test_user
-default cbo_t3 hive_test_user USER SELECT true -1 hive_test_user
-default cbo_t3 hive_test_user USER UPDATE true -1 hive_test_user
-default lineitem hive_test_user USER DELETE true -1 hive_test_user
-default lineitem hive_test_user USER INSERT true -1 hive_test_user
-default lineitem hive_test_user USER SELECT true -1 hive_test_user
-default lineitem hive_test_user USER UPDATE true -1 hive_test_user
default moretypes hive_test_user USER DELETE true -1 hive_test_user
default moretypes hive_test_user USER INSERT true -1 hive_test_user
default moretypes hive_test_user USER SELECT true -1 hive_test_user
@@ -116,46 +92,18 @@ default src hive_test_user USER DELETE true -1 hive_test_user
default src hive_test_user USER INSERT true -1 hive_test_user
default src hive_test_user USER SELECT true -1 hive_test_user
default src hive_test_user USER UPDATE true -1 hive_test_user
-default src1 hive_test_user USER DELETE true -1 hive_test_user
-default src1 hive_test_user USER INSERT true -1 hive_test_user
-default src1 hive_test_user USER SELECT true -1 hive_test_user
-default src1 hive_test_user USER UPDATE true -1 hive_test_user
default src_buck hive_test_user USER DELETE true -1 hive_test_user
default src_buck hive_test_user USER INSERT true -1 hive_test_user
default src_buck hive_test_user USER SELECT true -1 hive_test_user
default src_buck hive_test_user USER UPDATE true -1 hive_test_user
-default src_cbo hive_test_user USER DELETE true -1 hive_test_user
-default src_cbo hive_test_user USER INSERT true -1 hive_test_user
-default src_cbo hive_test_user USER SELECT true -1 hive_test_user
-default src_cbo hive_test_user USER UPDATE true -1 hive_test_user
-default src_json hive_test_user USER DELETE true -1 hive_test_user
-default src_json hive_test_user USER INSERT true -1 hive_test_user
-default src_json hive_test_user USER SELECT true -1 hive_test_user
-default src_json hive_test_user USER UPDATE true -1 hive_test_user
-default src_sequencefile hive_test_user USER DELETE true -1 hive_test_user
-default src_sequencefile hive_test_user USER INSERT true -1 hive_test_user
-default src_sequencefile hive_test_user USER SELECT true -1 hive_test_user
-default src_sequencefile hive_test_user USER UPDATE true -1 hive_test_user
default src_skew hive_test_user USER DELETE true -1 hive_test_user
default src_skew hive_test_user USER INSERT true -1 hive_test_user
default src_skew hive_test_user USER SELECT true -1 hive_test_user
default src_skew hive_test_user USER UPDATE true -1 hive_test_user
-default src_thrift hive_test_user USER DELETE true -1 hive_test_user
-default src_thrift hive_test_user USER INSERT true -1 hive_test_user
-default src_thrift hive_test_user USER SELECT true -1 hive_test_user
-default src_thrift hive_test_user USER UPDATE true -1 hive_test_user
default srcbucket hive_test_user USER DELETE true -1 hive_test_user
default srcbucket hive_test_user USER INSERT true -1 hive_test_user
default srcbucket hive_test_user USER SELECT true -1 hive_test_user
default srcbucket hive_test_user USER UPDATE true -1 hive_test_user
-default srcbucket2 hive_test_user USER DELETE true -1 hive_test_user
-default srcbucket2 hive_test_user USER INSERT true -1 hive_test_user
-default srcbucket2 hive_test_user USER SELECT true -1 hive_test_user
-default srcbucket2 hive_test_user USER UPDATE true -1 hive_test_user
-default srcpart hive_test_user USER DELETE true -1 hive_test_user
-default srcpart hive_test_user USER INSERT true -1 hive_test_user
-default srcpart hive_test_user USER SELECT true -1 hive_test_user
-default srcpart hive_test_user USER UPDATE true -1 hive_test_user
information_schema column_privileges hive_test_user USER DELETE true -1 hive_test_user
information_schema column_privileges hive_test_user USER INSERT true -1 hive_test_user
information_schema column_privileges hive_test_user USER SELECT true -1 hive_test_user
@@ -428,7 +376,6 @@ POSTHOOK: Input: sys@bucketing_cols
#### A masked pattern was here ####
key 0
key 0
-key 0
value 0
PREHOOK: query: select t.tbl_name, c.column_name from tbls t join sds s on t.sd_id=s.sd_id join columns_v2 c on s.cd_id=c.cd_id order by t.tbl_name, c.column_name
PREHOOK: type: QUERY
@@ -442,48 +389,9 @@ POSTHOOK: Input: sys@columns_v2
POSTHOOK: Input: sys@sds
POSTHOOK: Input: sys@tbls
#### A masked pattern was here ####
-alltypesorc cbigint
-alltypesorc cboolean1
-alltypesorc cboolean2
-alltypesorc cdouble
-alltypesorc cfloat
-alltypesorc cint
-alltypesorc csmallint
-alltypesorc cstring1
-alltypesorc cstring2
-alltypesorc ctimestamp1
-alltypesorc ctimestamp2
-alltypesorc ctinyint
-alltypesparquet cbigint
-alltypesparquet cboolean1
-alltypesparquet cboolean2
-alltypesparquet cdouble
-alltypesparquet cfloat
-alltypesparquet cint
-alltypesparquet csmallint
-alltypesparquet cstring1
-alltypesparquet cstring2
-alltypesparquet ctimestamp1
-alltypesparquet ctimestamp2
-alltypesparquet ctinyint
bucketing_cols bucket_col_name
bucketing_cols integer_idx
bucketing_cols sd_id
-cbo_t1 c_boolean
-cbo_t1 c_float
-cbo_t1 c_int
-cbo_t1 key
-cbo_t1 value
-cbo_t2 c_boolean
-cbo_t2 c_float
-cbo_t2 c_int
-cbo_t2 key
-cbo_t2 value
-cbo_t3 c_boolean
-cbo_t3 c_float
-cbo_t3 c_int
-cbo_t3 key
-cbo_t3 value
cds cd_id
column_privileges column_name
column_privileges grantee
@@ -664,22 +572,6 @@ key_constraints parent_integer_idx
key_constraints parent_tbl_id
key_constraints position
key_constraints update_rule
-lineitem l_comment
-lineitem l_commitdate
-lineitem l_discount
-lineitem l_extendedprice
-lineitem l_linenumber
-lineitem l_linestatus
-lineitem l_orderkey
-lineitem l_partkey
-lineitem l_quantity
-lineitem l_receiptdate
-lineitem l_returnflag
-lineitem l_shipdate
-lineitem l_shipinstruct
-lineitem l_shipmode
-lineitem l_suppkey
-lineitem l_tax
locks acquired_at
locks agent_info
locks blockedby_ext_id
@@ -881,32 +773,11 @@ sort_cols order
sort_cols sd_id
src key
src value
-src1 key
-src1 value
src_buck key
src_buck value
-src_cbo key
-src_cbo value
-src_json json
-src_sequencefile key
-src_sequencefile value
src_skew key
-src_thrift aint
-src_thrift astring
-src_thrift attributes
-src_thrift lint
-src_thrift lintstring
-src_thrift lstring
-src_thrift mstringstring
-src_thrift unionfield1
-src_thrift unionfield2
-src_thrift unionfield3
srcbucket key
srcbucket value
-srcbucket2 key
-srcbucket2 value
-srcpart key
-srcpart value
tab_col_stats avg_col_len
tab_col_stats big_decimal_high_value
tab_col_stats big_decimal_low_value
@@ -1112,11 +983,6 @@ POSTHOOK: query: select part_name from partitions order by part_name limit 5
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@partitions
#### A masked pattern was here ####
-ds=2008-04-08/hr=11
-ds=2008-04-08/hr=12
-ds=2008-04-09/hr=11
-ds=2008-04-09/hr=12
-dt=2014
PREHOOK: query: select pkey_name, pkey_type from partition_keys order by pkey_name limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@partition_keys
@@ -1125,10 +991,6 @@ POSTHOOK: query: select pkey_name, pkey_type from partition_keys order by pkey_n
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@partition_keys
#### A masked pattern was here ####
-ds string
-dt string
-dt string
-hr string
PREHOOK: query: select part_key_val, integer_idx from partition_key_vals order by part_key_val, integer_idx limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@partition_key_vals
@@ -1137,11 +999,6 @@ POSTHOOK: query: select part_key_val, integer_idx from partition_key_vals order
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@partition_key_vals
#### A masked pattern was here ####
-11 1
-11 1
-12 1
-12 1
-2008-04-08 0
PREHOOK: query: select param_key, param_value from partition_params order by param_key, param_value limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@partition_params
@@ -1150,11 +1007,6 @@ POSTHOOK: query: select param_key, param_value from partition_params order by pa
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@partition_params
#### A masked pattern was here ####
-COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"c_boolean":"true","c_float":"true","c_int":"true","key":"true","value":"true"}}
-COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"c_boolean":"true","c_float":"true","c_int":"true","key":"true","value":"true"}}
-COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
PREHOOK: query: select grantor, principal_name from part_col_privs order by grantor, principal_name limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@part_col_privs
@@ -1323,7 +1175,7 @@ POSTHOOK: query: select count(*) from sds
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@sds
#### A masked pattern was here ####
-91
+72
PREHOOK: query: select param_key, param_value from sd_params order by param_key, param_value limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@sd_params
@@ -1342,8 +1194,8 @@ POSTHOOK: Input: sys@serdes
#### A masked pattern was here ####
NULL org.apache.hadoop.hive.ql.io.orc.OrcSerde
NULL org.apache.hadoop.hive.ql.io.orc.OrcSerde
-NULL org.apache.hadoop.hive.ql.io.orc.OrcSerde
-NULL org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe
+NULL org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+NULL org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
NULL org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
PREHOOK: query: select param_key, param_value from serde_params order by param_key, param_value limit 5
PREHOOK: type: QUERY
@@ -1353,11 +1205,11 @@ POSTHOOK: query: select param_key, param_value from serde_params order by param_
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@serde_params
#### A masked pattern was here ####
-field.delim ,
-field.delim ,
-field.delim ,
-field.delim ,
-field.delim ,
+serialization.format 1
+serialization.format 1
+serialization.format 1
+serialization.format 1
+serialization.format 1
PREHOOK: query: select skewed_col_name from skewed_col_names order by skewed_col_name limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@skewed_col_names
@@ -1432,11 +1284,11 @@ POSTHOOK: query: select tbl_name from tbls order by tbl_name limit 5
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@tbls
#### A masked pattern was here ####
-alltypesorc
-alltypesparquet
bucketing_cols
-cbo_t1
-cbo_t2
+cds
+column_privileges
+columns
+columns_v2
PREHOOK: query: select column_name, grantor, principal_name from tbl_col_privs order by column_name, principal_name limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@tbl_col_privs
@@ -1466,16 +1318,16 @@ POSTHOOK: query: select table_name, column_name, num_nulls, num_distincts from t
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@tab_col_stats
#### A masked pattern was here ####
-alltypesorc cbigint 3115 5917
-alltypesorc cboolean1 3114 NULL
-alltypesorc cboolean2 3115 NULL
-alltypesorc cdouble 3114 5527
-alltypesorc cfloat 3115 131
-alltypesorc cint 3115 6104
-alltypesorc csmallint 3114 5666
-alltypesorc cstring1 3114 5979
-alltypesorc cstring2 3115 6122
-alltypesorc ctimestamp1 3115 35
+part p_brand 0 16
+part p_comment 0 25
+part p_container 0 18
+part p_mfgr 0 5
+part p_name 0 25
+part p_partkey 0 25
+part p_retailprice 0 25
+part p_size 0 21
+part p_type 0 24
+src key 0 316
PREHOOK: query: select table_name, partition_name, column_name, num_nulls, num_distincts from part_col_stats order by table_name, partition_name, column_name limit 10
PREHOOK: type: QUERY
PREHOOK: Input: sys@part_col_stats
@@ -1484,16 +1336,6 @@ POSTHOOK: query: select table_name, partition_name, column_name, num_nulls, num_
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@part_col_stats
#### A masked pattern was here ####
-cbo_t1 dt=2014 c_boolean 2 NULL
-cbo_t1 dt=2014 c_float 2 1
-cbo_t1 dt=2014 c_int 2 1
-cbo_t1 dt=2014 key 2 4
-cbo_t1 dt=2014 value 2 4
-cbo_t2 dt=2014 c_boolean 2 NULL
-cbo_t2 dt=2014 c_float 2 2
-cbo_t2 dt=2014 c_int 2 2
-cbo_t2 dt=2014 key 2 5
-cbo_t2 dt=2014 value 2 5
PREHOOK: query: select schema_version from version order by schema_version limit 5
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
@@ -1551,11 +1393,6 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@partition_params
POSTHOOK: Input: sys@partition_stats_view
#### A masked pattern was here ####
-{"BASIC_STATS":"true","COLUMN_STATS":{"c_boolean":"true","c_float":"true","c_int":"true","key":"true","value":"true"}} 1 20 262 282
-{"BASIC_STATS":"true","COLUMN_STATS":{"c_boolean":"true","c_float":"true","c_int":"true","key":"true","value":"true"}} 1 20 262 282
-{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} 1 500 5312 5812
-{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} 1 500 5312 5812
-{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} 1 500 5312 5812
PREHOOK: query: describe sys.tab_col_stats
PREHOOK: type: DESCTABLE
PREHOOK: Input: sys@tab_col_stats
@@ -1654,7 +1491,7 @@ POSTHOOK: query: select max(num_distincts) from sys.tab_col_stats
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@tab_col_stats
#### A masked pattern was here ####
-6122
+431
PREHOOK: query: select * from compactions
PREHOOK: type: QUERY
PREHOOK: Input: sys@compaction_queue
@@ -1704,27 +1541,14 @@ POSTHOOK: Input: sys@dbs
POSTHOOK: Input: sys@tbl_privs
POSTHOOK: Input: sys@tbls
#### A masked pattern was here ####
-default default alltypesorc BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default alltypesparquet BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default cbo_t1 BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default cbo_t2 BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default cbo_t3 BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default lineitem BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default default moretypes BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default default part BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default default scr_txn BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default default scr_txn_2 BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default default src BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default src1 BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default default src_buck BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default src_cbo BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default src_json BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default src_sequencefile BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default default src_skew BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default src_thrift BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default default srcbucket BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default srcbucket2 BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
-default default srcpart BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default information_schema column_privileges VIEW NULL NULL NULL NULL NULL NO NO NULL
default information_schema columns VIEW NULL NULL NULL NULL NULL NO NO NULL
default information_schema compactions VIEW NULL NULL NULL NULL NULL NO NO NULL
@@ -1803,16 +1627,16 @@ POSTHOOK: Input: sys@dbs
POSTHOOK: Input: sys@tbl_privs
POSTHOOK: Input: sys@tbls
#### A masked pattern was here ####
-hive_test_user hive_test_user default default alltypesorc DELETE YES NO
-hive_test_user hive_test_user default default alltypesorc INSERT YES NO
-hive_test_user hive_test_user default default alltypesorc SELECT YES NO
-hive_test_user hive_test_user default default alltypesorc UPDATE YES NO
-hive_test_user hive_test_user default default alltypesparquet DELETE YES NO
-hive_test_user hive_test_user default default alltypesparquet INSERT YES NO
-hive_test_user hive_test_user default default alltypesparquet SELECT YES NO
-hive_test_user hive_test_user default default alltypesparquet UPDATE YES NO
-hive_test_user hive_test_user default default cbo_t1 DELETE YES NO
-hive_test_user hive_test_user default default cbo_t1 INSERT YES NO
+hive_test_user hive_test_user default default moretypes DELETE YES NO
+hive_test_user hive_test_user default default moretypes INSERT YES NO
+hive_test_user hive_test_user default default moretypes SELECT YES NO
+hive_test_user hive_test_user default default moretypes UPDATE YES NO
+hive_test_user hive_test_user default default part DELETE YES NO
+hive_test_user hive_test_user default default part INSERT YES NO
+hive_test_user hive_test_user default default part SELECT YES NO
+hive_test_user hive_test_user default default part UPDATE YES NO
+hive_test_user hive_test_user default default scr_txn DELETE YES NO
+hive_test_user hive_test_user default default scr_txn INSERT YES NO
PREHOOK: query: select table_catalog,table_schema,table_name,column_name,ordinal_position,column_default,is_nullable,data_type,character_maximum_length,character_octet_length,numeric_precision,numeric_precision_radix,numeric_scale,datetime_precision,interval_type,interval_precision,character_set_catalog,character_set_schema,character_set_name,collation_catalog,collation_schema,collation_name,udt_catalog,udt_schema,udt_name,scope_catalog,scope_schema,scope_name,maximum_cardinality,is_self [...]
PREHOOK: type: QUERY
PREHOOK: Input: information_schema@columns
@@ -1831,18 +1655,6 @@ POSTHOOK: Input: sys@sds
POSTHOOK: Input: sys@tbl_col_privs
POSTHOOK: Input: sys@tbls
#### A masked pattern was here ####
-default default alltypesorc cbigint 3 NULL YES bigint NULL NULL 19 10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES bigint 19 10
-default default alltypesorc cboolean1 10 NULL YES boolean NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES boolean NULL NULL
-default default alltypesorc cboolean2 11 NULL YES boolean NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES boolean NULL NULL
-default default alltypesorc cdouble 5 NULL YES double NULL NULL 53 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES double 53 2
-default default alltypesorc cfloat 4 NULL YES float NULL NULL 23 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES float 23 2
-default default alltypesorc cint 2 NULL YES int NULL NULL 10 10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES int 10 10
-default default alltypesorc csmallint 1 NULL YES smallint NULL NULL 5 10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES smallint 5 10
-default default alltypesorc cstring1 6 NULL YES string NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES string NULL NULL
-default default alltypesorc cstring2 7 NULL YES string NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES string NULL NULL
-default default alltypesorc ctimestamp1 8 NULL YES timestamp NULL NULL NULL NULL NULL 9 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES timestamp NULL NULL
-default default alltypesorc ctimestamp2 9 NULL YES timestamp NULL NULL NULL NULL NULL 9 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES timestamp NULL NULL
-default default alltypesorc ctinyint 0 NULL YES tinyint NULL NULL 3 10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES tinyint 3 10
default default moretypes a 0 NULL YES decimal(10,2) NULL NULL 10 10 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES decimal(10,2) 10 10
default default moretypes b 1 NULL YES tinyint NULL NULL 3 10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES tinyint 3 10
default default moretypes c 2 NULL YES smallint NULL NULL 5 10 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NO NO NULL NULL NULL NULL NULL NULL NEVER NULL NO NO NULL YES smallint 5 10
[hive] 01/04: HIVE-23246: Reduce MiniDruidCluster memory
requeirements (Zoltan Haindrich reviewed by Peter Vary)
Posted by kg...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
commit 998be10e21aa1de58b9d0f48940bc216ba66dbde
Author: Zoltan Haindrich <ki...@rxd.hu>
AuthorDate: Wed Apr 22 08:06:48 2020 +0000
HIVE-23246: Reduce MiniDruidCluster memory requeirements (Zoltan Haindrich reviewed by Peter Vary)
Signed-off-by: Zoltan Haindrich <zh...@cloudera.com>
---
.../java/org/apache/hive/druid/MiniDruidCluster.java | 20 +++++++++-----------
1 file changed, 9 insertions(+), 11 deletions(-)
diff --git a/itests/qtest-druid/src/main/java/org/apache/hive/druid/MiniDruidCluster.java b/itests/qtest-druid/src/main/java/org/apache/hive/druid/MiniDruidCluster.java
index 8081595..0fb63ce 100644
--- a/itests/qtest-druid/src/main/java/org/apache/hive/druid/MiniDruidCluster.java
+++ b/itests/qtest-druid/src/main/java/org/apache/hive/druid/MiniDruidCluster.java
@@ -60,7 +60,7 @@ public class MiniDruidCluster extends AbstractService {
"druid.storage.type",
"hdfs",
"druid.processing.buffer.sizeBytes",
- "213870912",
+ "10485760",
"druid.processing.numThreads",
"2",
"druid.worker.capacity",
@@ -72,16 +72,14 @@ public class MiniDruidCluster extends AbstractService {
private static final Map<String, String>
COMMON_COORDINATOR_INDEXER =
- ImmutableMap.of("druid.indexer.logs.type",
- "file",
- "druid.coordinator.asOverlord.enabled",
- "true",
- "druid.coordinator.asOverlord.overlordService",
- "druid/overlord",
- "druid.coordinator.period",
- "PT2S",
- "druid.manager.segments.pollDuration",
- "PT2S");
+ ImmutableMap.<String,String>builder()
+ .put("druid.indexer.logs.type", "file")
+ .put("druid.coordinator.asOverlord.enabled", "true")
+ .put("druid.coordinator.asOverlord.overlordService", "druid/overlord")
+ .put("druid.coordinator.period", "PT2S")
+ .put("druid.manager.segments.pollDuration", "PT2S")
+ .put("druid.indexer.runner.javaOpts", "-Xmx512m")
+ .build();
private static final int MIN_PORT_NUMBER = 60000;
private static final int MAX_PORT_NUMBER = 65535;