You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mg...@apache.org on 2020/06/17 12:57:59 UTC
[hive] branch master updated: HIVE-23418 : Add
test.local.warehouse.dir for TestMiniLlapLocalDriver tests (Miklos Gergely,
reviewed by Zoltan Haindrich)
This is an automated email from the ASF dual-hosted git repository.
mgergely pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new ed639c4 HIVE-23418 : Add test.local.warehouse.dir for TestMiniLlapLocalDriver tests (Miklos Gergely, reviewed by Zoltan Haindrich)
ed639c4 is described below
commit ed639c4cc9a00324711ce1659e355bb36876115a
Author: Miklos Gergely <mg...@cloudera.com>
AuthorDate: Wed Jun 17 14:57:48 2020 +0200
HIVE-23418 : Add test.local.warehouse.dir for TestMiniLlapLocalDriver tests (Miklos Gergely, reviewed by Zoltan Haindrich)
---
.../test/resources/testconfiguration.properties | 16 -
pom.xml | 2 +
ql/src/test/queries/clientpositive/input44.q | 2 +-
ql/src/test/queries/clientpositive/msck_repair_0.q | 8 +-
ql/src/test/queries/clientpositive/msck_repair_1.q | 4 +-
ql/src/test/queries/clientpositive/msck_repair_2.q | 6 +-
ql/src/test/queries/clientpositive/msck_repair_3.q | 2 +-
ql/src/test/queries/clientpositive/msck_repair_4.q | 4 +-
ql/src/test/queries/clientpositive/msck_repair_5.q | 6 +-
.../test/queries/clientpositive/msck_repair_acid.q | 10 +-
.../queries/clientpositive/msck_repair_batchsize.q | 12 +-
.../test/queries/clientpositive/msck_repair_drop.q | 164 ++++-----
ql/src/test/queries/clientpositive/nullformat.q | 2 +-
.../test/queries/clientpositive/nullformatCTAS.q | 2 +-
.../queries/clientpositive/partition_discovery.q | 8 +-
ql/src/test/queries/clientpositive/repair.q | 6 +-
.../clientpositive/symlink_text_input_format.q | 8 +-
.../clientpositive/{ => llap}/input44.q.out | 0
.../clientpositive/{ => llap}/msck_repair_0.q.out | 0
.../clientpositive/{ => llap}/msck_repair_1.q.out | 0
.../clientpositive/{ => llap}/msck_repair_2.q.out | 0
.../clientpositive/{ => llap}/msck_repair_3.q.out | 0
.../clientpositive/{ => llap}/msck_repair_4.q.out | 0
.../clientpositive/{ => llap}/msck_repair_5.q.out | 0
.../clientpositive/{ => llap}/msck_repair_6.q.out | 0
.../{ => llap}/msck_repair_acid.q.out | 0
.../{ => llap}/msck_repair_batchsize.q.out | 0
.../{ => llap}/msck_repair_drop.q.out | 0
.../clientpositive/{ => llap}/nullformat.q.out | 0
.../clientpositive/{ => llap}/nullformatCTAS.q.out | 160 ++++-----
.../{ => llap}/partition_discovery.q.out | 0
.../results/clientpositive/{ => llap}/repair.q.out | 0
.../{ => llap}/symlink_text_input_format.q.out | 374 ++++++++++++---------
33 files changed, 403 insertions(+), 393 deletions(-)
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index f430a13..810de56 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -205,7 +205,6 @@ mr.query.files=\
infer_bucket_sort.q,\
input37.q,\
input39.q,\
- input44.q,\
inputwherefalse.q,\
join_map_ppr.q,\
join_vc.q,\
@@ -222,24 +221,10 @@ mr.query.files=\
mapjoin_subquery2.q,\
mapjoin_test_outer.q,\
masking_5.q,\
- msck_repair_0.q,\
- msck_repair_1.q,\
- msck_repair_2.q,\
- msck_repair_3.q,\
- msck_repair_4.q,\
- msck_repair_5.q,\
- msck_repair_6.q,\
- msck_repair_acid.q,\
- msck_repair_batchsize.q,\
- msck_repair_drop.q,\
nonmr_fetch.q,\
nonreserved_keywords_input37.q,\
- nullformat.q,\
- nullformatCTAS.q,\
parenthesis_star_by.q,\
- partition_discovery.q,\
partition_vs_table_metadata.q,\
- repair.q,\
row__id.q,\
sample_islocalmode_hook.q,\
sample_islocalmode_hook_use_metadata.q,\
@@ -280,7 +265,6 @@ mr.query.files=\
sort_merge_join_desc_7.q,\
sort_merge_join_desc_8.q,\
stats_noscan_2.q,\
- symlink_text_input_format.q,\
timestamptz_2.q,\
transform_acid.q,\
type_change_test_fraction_vectorized.q,\
diff --git a/pom.xml b/pom.xml
index 44fff7d..bb93b52 100644
--- a/pom.xml
+++ b/pom.xml
@@ -91,6 +91,7 @@
<!-- Determines the log level of the console logger, hive.log is independent of this-->
<test.console.log.level>INFO</test.console.log.level>
<test.warehouse.dir>${project.build.directory}/warehouse</test.warehouse.dir>
+ <test.local.warehouse.dir>${project.build.directory}/localfs/warehouse</test.local.warehouse.dir>
<test.warehouse.scheme>pfile://</test.warehouse.scheme>
<!-- To add additional exclude patterns set this property -->
@@ -1412,6 +1413,7 @@
<test.dfs.mkdir>${test.dfs.mkdir}</test.dfs.mkdir>
<test.output.overwrite>${test.output.overwrite}</test.output.overwrite>
<test.warehouse.dir>${test.warehouse.scheme}${test.warehouse.dir}</test.warehouse.dir>
+ <test.local.warehouse.dir>${test.warehouse.scheme}${test.local.warehouse.dir}</test.local.warehouse.dir>
<java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
<!-- EnforceReadOnlyTables hook and QTestUtil -->
<test.src.tables></test.src.tables>
diff --git a/ql/src/test/queries/clientpositive/input44.q b/ql/src/test/queries/clientpositive/input44.q
index c4ed032..21a3af9 100644
--- a/ql/src/test/queries/clientpositive/input44.q
+++ b/ql/src/test/queries/clientpositive/input44.q
@@ -4,4 +4,4 @@ CREATE TABLE dest_n0(key INT, value STRING) STORED AS TEXTFILE;
SET hive.output.file.extension=.txt;
INSERT OVERWRITE TABLE dest_n0 SELECT src.* FROM src;
-dfs -cat ${system:test.warehouse.dir}/dest_n0/*.txt
+dfs -cat ${system:test.local.warehouse.dir}/dest_n0/*.txt
diff --git a/ql/src/test/queries/clientpositive/msck_repair_0.q b/ql/src/test/queries/clientpositive/msck_repair_0.q
index 17168e2..3346971 100644
--- a/ql/src/test/queries/clientpositive/msck_repair_0.q
+++ b/ql/src/test/queries/clientpositive/msck_repair_0.q
@@ -9,8 +9,8 @@ MSCK TABLE repairtable_n5;
show partitions repairtable_n5;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n5/p1=c/p2=a/p3=b;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n5/p1=c/p2=a/p3=b/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n5/p1=c/p2=a/p3=b;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n5/p1=c/p2=a/p3=b/datafile;
MSCK TABLE default.repairtable_n5;
@@ -26,8 +26,8 @@ show partitions repairtable_n5;
set hive.mapred.mode=strict;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n5/p1=e/p2=f/p3=g;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n5/p1=e/p2=f/p3=g/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n5/p1=e/p2=f/p3=g;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n5/p1=e/p2=f/p3=g/datafile;
MSCK REPAIR TABLE default.repairtable_n5;
diff --git a/ql/src/test/queries/clientpositive/msck_repair_1.q b/ql/src/test/queries/clientpositive/msck_repair_1.q
index 21aca3b..d0b86c0 100644
--- a/ql/src/test/queries/clientpositive/msck_repair_1.q
+++ b/ql/src/test/queries/clientpositive/msck_repair_1.q
@@ -8,8 +8,8 @@ MSCK TABLE repairtable;
SHOW PARTITIONS repairtable;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=c/p2=a/p3=b;
-dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=c/p2=a/p3=b/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable/p1=c/p2=a/p3=b;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable/p1=c/p2=a/p3=b/datafile;
MSCK TABLE default.repairtable;
diff --git a/ql/src/test/queries/clientpositive/msck_repair_2.q b/ql/src/test/queries/clientpositive/msck_repair_2.q
index 2a0987f..43c07f9 100644
--- a/ql/src/test/queries/clientpositive/msck_repair_2.q
+++ b/ql/src/test/queries/clientpositive/msck_repair_2.q
@@ -9,9 +9,9 @@ MSCK TABLE repairtable_n2;
show partitions repairtable_n2;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n2/p1=c/p2=a/p3=b;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n2/p1=c/p2=a/p3=b/datafile;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n2/p1=c/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n2/p1=c/p2=a/p3=b;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n2/p1=c/p2=a/p3=b/datafile;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n2/p1=c/datafile;
MSCK TABLE default.repairtable_n2;
show partitions repairtable_n2;
diff --git a/ql/src/test/queries/clientpositive/msck_repair_3.q b/ql/src/test/queries/clientpositive/msck_repair_3.q
index 2e01f69..0f73184 100644
--- a/ql/src/test/queries/clientpositive/msck_repair_3.q
+++ b/ql/src/test/queries/clientpositive/msck_repair_3.q
@@ -7,7 +7,7 @@ CREATE TABLE repairtable_n3(col STRING) PARTITIONED BY (p1 STRING, p2 STRING);
MSCK TABLE repairtable_n3;
show partitions repairtable_n3;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n3/p1=c/p2=a/p3=b;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n3/p1=c/p2=a/p3=b;
MSCK TABLE default.repairtable_n3;
show partitions repairtable_n3;
diff --git a/ql/src/test/queries/clientpositive/msck_repair_4.q b/ql/src/test/queries/clientpositive/msck_repair_4.q
index 68ddaea..b3cd37b 100644
--- a/ql/src/test/queries/clientpositive/msck_repair_4.q
+++ b/ql/src/test/queries/clientpositive/msck_repair_4.q
@@ -5,8 +5,8 @@ CREATE EXTERNAL TABLE repairtable_n4(key INT, value STRING) PARTITIONED BY (Year
MSCK REPAIR TABLE repairtable_n4;
show partitions repairtable_n4;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n4/Year=2020/Month=3/Day=1;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n4/Year=2020/Month=3/Day=2;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n4/Year=2020/Month=3/Day=1;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n4/Year=2020/Month=3/Day=2;
MSCK REPAIR TABLE repairtable_n4;
show partitions repairtable_n4;
diff --git a/ql/src/test/queries/clientpositive/msck_repair_5.q b/ql/src/test/queries/clientpositive/msck_repair_5.q
index 677063b..229de20 100644
--- a/ql/src/test/queries/clientpositive/msck_repair_5.q
+++ b/ql/src/test/queries/clientpositive/msck_repair_5.q
@@ -5,9 +5,9 @@ CREATE EXTERNAL TABLE repairtable_n5(key INT, value STRING) PARTITIONED BY (Coun
MSCK REPAIR TABLE repairtable_n5;
show partitions repairtable_n5;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n5/Country=US;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n5/Country=us;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n5/Country=India;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n5/Country=US;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n5/Country=us;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n5/Country=India;
MSCK REPAIR TABLE repairtable_n5;
show partitions repairtable_n5;
diff --git a/ql/src/test/queries/clientpositive/msck_repair_acid.q b/ql/src/test/queries/clientpositive/msck_repair_acid.q
index 369095d..a9a5317 100644
--- a/ql/src/test/queries/clientpositive/msck_repair_acid.q
+++ b/ql/src/test/queries/clientpositive/msck_repair_acid.q
@@ -12,10 +12,10 @@ MSCK TABLE repairtable_n6;
show partitions repairtable_n6;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n6/p1=a/p2=b/;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n6/p1=c/p2=d/;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n6/p1=a/p2=b/datafile;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n6/p1=c/p2=d/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n6/p1=a/p2=b/;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n6/p1=c/p2=d/;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n6/p1=a/p2=b/datafile;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n6/p1=c/p2=d/datafile;
EXPLAIN LOCKS MSCK REPAIR TABLE default.repairtable_n6;
MSCK REPAIR TABLE default.repairtable_n6;
@@ -24,7 +24,7 @@ show partitions default.repairtable_n6;
set hive.mapred.mode=strict;
-dfs -rmr ${system:test.warehouse.dir}/repairtable_n6/p1=c;
+dfs -rmr ${system:test.local.warehouse.dir}/repairtable_n6/p1=c;
EXPLAIN LOCKS MSCK REPAIR TABLE default.repairtable_n6 DROP PARTITIONS;
MSCK REPAIR TABLE default.repairtable_n6 DROP PARTITIONS;
diff --git a/ql/src/test/queries/clientpositive/msck_repair_batchsize.q b/ql/src/test/queries/clientpositive/msck_repair_batchsize.q
index 10ffc8a..6d35b04 100644
--- a/ql/src/test/queries/clientpositive/msck_repair_batchsize.q
+++ b/ql/src/test/queries/clientpositive/msck_repair_batchsize.q
@@ -6,12 +6,12 @@ CREATE TABLE repairtable_n0(col STRING) PARTITIONED BY (p1 STRING, p2 STRING);
MSCK TABLE repairtable_n0;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n0/p1=a/p2=a;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n0/p1=b/p2=a;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n0/p1=c/p2=a;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n0/p1=a/p2=a/datafile;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n0/p1=b/p2=a/datafile;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n0/p1=c/p2=a/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n0/p1=a/p2=a;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n0/p1=b/p2=a;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n0/p1=c/p2=a;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n0/p1=a/p2=a/datafile;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n0/p1=b/p2=a/datafile;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n0/p1=c/p2=a/datafile;
MSCK TABLE default.repairtable_n0;
show partitions default.repairtable_n0;
diff --git a/ql/src/test/queries/clientpositive/msck_repair_drop.q b/ql/src/test/queries/clientpositive/msck_repair_drop.q
index 3fe80ef..54fc91a 100644
--- a/ql/src/test/queries/clientpositive/msck_repair_drop.q
+++ b/ql/src/test/queries/clientpositive/msck_repair_drop.q
@@ -11,31 +11,31 @@ CREATE TABLE repairtable_n1(col STRING) PARTITIONED BY (p1 STRING, p2 STRING);
-- the same set of 10 partitions will be created between each drop attempts
-- p1=3, p1=4 and p1=5 will be used to test keywords add, drop and sync
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=1/p2=11/p3=111;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=1/p2=11/p3=111/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=1/p2=12/p3=121;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=1/p2=12/p3=121/datafile;
-
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=1/p2=11/p3=111;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=1/p2=11/p3=111/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=1/p2=12/p3=121;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=1/p2=12/p3=121/datafile;
+
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101/datafile;
EXPLAIN MSCK TABLE default.repairtable_n1;
MSCK TABLE default.repairtable_n1;
@@ -46,7 +46,7 @@ MSCK REPAIR TABLE default.repairtable_n1;
show partitions default.repairtable_n1;
-- Remove all p1=2 partitions from file system
-dfs -rmr ${system:test.warehouse.dir}/repairtable_n1/p1=2;
+dfs -rmr ${system:test.local.warehouse.dir}/repairtable_n1/p1=2;
-- test 1: each partition is dropped individually
set hive.msck.repair.batch.size=1;
@@ -57,26 +57,26 @@ MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS;
show partitions default.repairtable_n1;
-- Recreate p1=2 partitions
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101/datafile;
MSCK TABLE default.repairtable_n1;
MSCK REPAIR TABLE default.repairtable_n1;
@@ -85,7 +85,7 @@ MSCK REPAIR TABLE default.repairtable_n1;
show partitions default.repairtable_n1;
-- Remove all p1=2 partitions from file system
-dfs -rmr ${system:test.warehouse.dir}/repairtable_n1/p1=2;
+dfs -rmr ${system:test.local.warehouse.dir}/repairtable_n1/p1=2;
-- test 2: partition are dropped in groups of 3
set hive.msck.repair.batch.size=3;
@@ -94,26 +94,26 @@ MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS;
show partitions default.repairtable_n1;
-- Recreate p1=2 partitions
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=21/p3=211/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=22/p3=221/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=23/p3=231/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=24/p3=241/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=25/p3=251/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=26/p3=261/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=27/p3=271/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=28/p3=281/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=29/p3=291/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=2/p2=210/p3=2101/datafile;
MSCK TABLE default.repairtable_n1;
MSCK REPAIR TABLE default.repairtable_n1;
@@ -122,7 +122,7 @@ MSCK REPAIR TABLE default.repairtable_n1;
show partitions default.repairtable_n1;
-- Remove all p1=2 partitions from file system
-dfs -rmr ${system:test.warehouse.dir}/repairtable_n1/p1=2;
+dfs -rmr ${system:test.local.warehouse.dir}/repairtable_n1/p1=2;
-- test 3. all partitions are dropped in 1 shot
set hive.msck.repair.batch.size=0;
@@ -131,23 +131,23 @@ MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS;
show partitions default.repairtable_n1;
-- test add parition keyword: begin
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=3/p2=31/p3=311;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=3/p2=31/p3=311/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=3/p2=32/p3=321;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=3/p2=32/p3=321/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=3/p2=31/p3=311;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=3/p2=31/p3=311/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=3/p2=32/p3=321;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=3/p2=32/p3=321/datafile;
MSCK TABLE default.repairtable_n1;
MSCK REPAIR TABLE default.repairtable_n1;
show partitions default.repairtable_n1;
-- Create p1=4 in filesystem
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=4/p2=41/p3=411;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=4/p2=41/p3=411/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=4/p2=42/p3=421;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=4/p2=42/p3=421/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=4/p2=41/p3=411;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=4/p2=41/p3=411/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=4/p2=42/p3=421;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=4/p2=42/p3=421/datafile;
-- Remove p1=3 from filesystem
-dfs -rmr ${system:test.warehouse.dir}/repairtable_n1/p1=3;
+dfs -rmr ${system:test.local.warehouse.dir}/repairtable_n1/p1=3;
-- Status: p1=3 dropped from filesystem, but exists in metastore
-- p1=4 exists in filesystem but not in metastore
@@ -160,10 +160,10 @@ show partitions default.repairtable_n1;
-- test add partition keyword: end
-- test drop partition keyword: begin
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=5/p2=51/p3=511;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=5/p2=51/p3=511/datafile;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n1/p1=5/p2=52/p3=521;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n1/p1=5/p2=52/p3=521/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=5/p2=51/p3=511;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=5/p2=51/p3=511/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n1/p1=5/p2=52/p3=521;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n1/p1=5/p2=52/p3=521/datafile;
-- Status: p1=3 removed from filesystem, but exists in metastore (as part of add test)
-- p1=5 exists in filesystem but not in metastore
@@ -175,7 +175,7 @@ show partitions default.repairtable_n1;
-- test sync partition keyword: begin
-- Remove p1=4 from filesystem
-dfs -rmr ${system:test.warehouse.dir}/repairtable_n1/p1=4;
+dfs -rmr ${system:test.local.warehouse.dir}/repairtable_n1/p1=4;
-- Status: p1=4 dropped from filesystem, but exists in metastore
-- p1=5 exists in filesystem but not in metastore (as part of drop test)
diff --git a/ql/src/test/queries/clientpositive/nullformat.q b/ql/src/test/queries/clientpositive/nullformat.q
index c9a7dab..80adbfc 100644
--- a/ql/src/test/queries/clientpositive/nullformat.q
+++ b/ql/src/test/queries/clientpositive/nullformat.q
@@ -13,7 +13,7 @@ SHOW CREATE TABLE null_tab1;
-- load null data from another table and verify that the null is stored in the expected format
INSERT OVERWRITE TABLE null_tab1 SELECT a,b FROM base_tab;
-dfs -cat ${system:test.warehouse.dir}/null_tab1/*;
+dfs -cat ${system:test.local.warehouse.dir}/null_tab1/*;
SELECT * FROM null_tab1;
-- alter the null format and verify that the old null format is no longer in effect
ALTER TABLE null_tab1 SET SERDEPROPERTIES ( 'serialization.null.format'='foo');
diff --git a/ql/src/test/queries/clientpositive/nullformatCTAS.q b/ql/src/test/queries/clientpositive/nullformatCTAS.q
index 093742f..df9488d 100644
--- a/ql/src/test/queries/clientpositive/nullformatCTAS.q
+++ b/ql/src/test/queries/clientpositive/nullformatCTAS.q
@@ -13,7 +13,7 @@ CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull'
DESCRIBE EXTENDED null_tab3;
SHOW CREATE TABLE null_tab3;
-dfs -cat ${system:test.warehouse.dir}/null_tab3/*;
+dfs -cat ${system:test.local.warehouse.dir}/null_tab3/*;
SELECT * FROM null_tab3;
-- alter the null format and verify that the old null format is no longer in effect
ALTER TABLE null_tab3 SET SERDEPROPERTIES ( 'serialization.null.format'='foo');
diff --git a/ql/src/test/queries/clientpositive/partition_discovery.q b/ql/src/test/queries/clientpositive/partition_discovery.q
index c85b6fd..5e22939 100644
--- a/ql/src/test/queries/clientpositive/partition_discovery.q
+++ b/ql/src/test/queries/clientpositive/partition_discovery.q
@@ -60,10 +60,10 @@ show partitions default.repairtable_n9;
CREATE EXTERNAL TABLE repairtable_n10 PARTITIONED BY(p1,p2) STORED AS ORC AS SELECT * FROM repairtable_n9;
describe formatted repairtable_n10;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n10/p1=a/p2=b/;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n10/p1=c/p2=d/;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n10/p1=a/p2=b/datafile;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n10/p1=c/p2=d/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n10/p1=a/p2=b/;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n10/p1=c/p2=d/;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n10/p1=a/p2=b/datafile;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n10/p1=c/p2=d/datafile;
set metastore.msck.repair.enable.partition.retention=false;
!sleep 12;
diff --git a/ql/src/test/queries/clientpositive/repair.q b/ql/src/test/queries/clientpositive/repair.q
index 27ae8d1..0530cb1 100644
--- a/ql/src/test/queries/clientpositive/repair.q
+++ b/ql/src/test/queries/clientpositive/repair.q
@@ -4,9 +4,9 @@ CREATE TABLE repairtable_n4(col STRING) PARTITIONED BY (p1 STRING, p2 STRING);
MSCK TABLE repairtable_n4;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n4/p1=a/p2=a;
-dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable_n4/p1=b/p2=a;
-dfs -touchz ${system:test.warehouse.dir}/repairtable_n4/p1=b/p2=a/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n4/p1=a/p2=a;
+dfs ${system:test.dfs.mkdir} ${system:test.local.warehouse.dir}/repairtable_n4/p1=b/p2=a;
+dfs -touchz ${system:test.local.warehouse.dir}/repairtable_n4/p1=b/p2=a/datafile;
MSCK TABLE default.repairtable_n4;
diff --git a/ql/src/test/queries/clientpositive/symlink_text_input_format.q b/ql/src/test/queries/clientpositive/symlink_text_input_format.q
index d7759c6..3ac3d12 100644
--- a/ql/src/test/queries/clientpositive/symlink_text_input_format.q
+++ b/ql/src/test/queries/clientpositive/symlink_text_input_format.q
@@ -8,8 +8,8 @@ CREATE TABLE symlink_text_input_format (key STRING, value STRING) STORED AS INPU
CREATE TABLE symlink_text_input_format (key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat';
-dfs -cp ../../data/files/symlink1.txt ${system:test.warehouse.dir}/symlink_text_input_format/symlink1.txt;
-dfs -cp ../../data/files/symlink2.txt ${system:test.warehouse.dir}/symlink_text_input_format/symlink2.txt;
+dfs -cp ../../data/files/symlink1.txt ${system:test.local.warehouse.dir}/symlink_text_input_format/symlink1.txt;
+dfs -cp ../../data/files/symlink2.txt ${system:test.local.warehouse.dir}/symlink_text_input_format/symlink2.txt;
EXPLAIN SELECT * FROM symlink_text_input_format order by key, value;
@@ -27,8 +27,8 @@ DROP TABLE symlink_text_input_format;
CREATE TABLE symlink_text_input_format (key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat';
-dfs -cp ../../data/files/symlink-with-regex.txt ${system:test.warehouse.dir}/symlink_text_input_format/symlink-with-regex.txt;
-dfs -cp ../../data/files/symlink2.txt ${system:test.warehouse.dir}/symlink_text_input_format/symlink2.txt;
+dfs -cp ../../data/files/symlink-with-regex.txt ${system:test.local.warehouse.dir}/symlink_text_input_format/symlink-with-regex.txt;
+dfs -cp ../../data/files/symlink2.txt ${system:test.local.warehouse.dir}/symlink_text_input_format/symlink2.txt;
EXPLAIN SELECT * FROM symlink_text_input_format order by key, value;
diff --git a/ql/src/test/results/clientpositive/input44.q.out b/ql/src/test/results/clientpositive/llap/input44.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/input44.q.out
rename to ql/src/test/results/clientpositive/llap/input44.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_0.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_0.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_0.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_0.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_1.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_1.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_1.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_1.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_2.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_2.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_2.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_2.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_3.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_3.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_3.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_3.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_4.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_4.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_4.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_4.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_5.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_5.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_5.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_5.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_6.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_6.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_6.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_6.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_acid.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_acid.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_acid.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_acid.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_batchsize.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_batchsize.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_batchsize.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_batchsize.q.out
diff --git a/ql/src/test/results/clientpositive/msck_repair_drop.q.out b/ql/src/test/results/clientpositive/llap/msck_repair_drop.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/msck_repair_drop.q.out
rename to ql/src/test/results/clientpositive/llap/msck_repair_drop.q.out
diff --git a/ql/src/test/results/clientpositive/nullformat.q.out b/ql/src/test/results/clientpositive/llap/nullformat.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/nullformat.q.out
rename to ql/src/test/results/clientpositive/llap/nullformat.q.out
diff --git a/ql/src/test/results/clientpositive/nullformatCTAS.q.out b/ql/src/test/results/clientpositive/llap/nullformatCTAS.q.out
similarity index 58%
rename from ql/src/test/results/clientpositive/nullformatCTAS.q.out
rename to ql/src/test/results/clientpositive/llap/nullformatCTAS.q.out
index 7d3d5eb..568aceb 100644
--- a/ql/src/test/results/clientpositive/nullformatCTAS.q.out
+++ b/ql/src/test/results/clientpositive/llap/nullformatCTAS.q.out
@@ -48,83 +48,77 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@null_tab3
STAGE DEPENDENCIES:
Stage-1 is a root stage
- Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
- Stage-4
- Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
- Stage-8 depends on stages: Stage-0
- Stage-2 depends on stages: Stage-8
- Stage-3
- Stage-5
- Stage-6 depends on stages: Stage-5
+ Stage-2 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-0, Stage-2
+ Stage-3 depends on stages: Stage-4
+ Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: base_tab_n2
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: a (type: string), b (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.null_tab3
- Select Operator
- expressions: _col0 (type: string), _col1 (type: string)
- outputColumnNames: col1, col2
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- Group By Operator
- aggregations: max(length(col1)), avg(COALESCE(length(col1),0)), count(1), count(col1), compute_bit_vector(col1, 'hll'), max(length(col2)), avg(COALESCE(length(col2),0)), count(col2), compute_bit_vector(col2, 'hll')
- minReductionHashAggr: 0.99
- mode: hash
- outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: base_tab_n2
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: a (type: string), b (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.null_tab3
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string)
+ outputColumnNames: col1, col2
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: max(length(col1)), avg(COALESCE(length(col1),0)), count(1), count(col1), compute_bit_vector(col1, 'hll'), max(length(col2)), avg(COALESCE(length(col2),0)), count(col2), compute_bit_vector(col2, 'hll')
+ minReductionHashAggr: 0.99
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 840 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ null sort order:
+ sort order:
+ Statistics: Num rows: 1 Data size: 840 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: int), _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: int), _col6 (type: struct<count:bigint,sum:double,input:int>), _col7 (type: bigint), _col8 (type: binary)
+ Execution mode: llap
+ LLAP IO: no inputs
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 840 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col0,0)) (type: bigint), COALESCE(_col1,0) (type: double), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col5,0)) (type: bigint), COALESCE(_col6,0) (type: double), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
Statistics: Num rows: 1 Data size: 840 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- null sort order:
- sort order:
+ File Output Operator
+ compressed: false
Statistics: Num rows: 1 Data size: 840 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: int), _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: int), _col6 (type: struct<count:bigint,sum:double,input:int>), _col7 (type: bigint), _col8 (type: binary)
- Reduce Operator Tree:
- Group By Operator
- aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8)
- mode: mergepartial
- outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
- Statistics: Num rows: 1 Data size: 840 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col0,0)) (type: bigint), COALESCE(_col1,0) (type: double), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col5,0)) (type: bigint), COALESCE(_col6,0) (type: double), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary)
- outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
- Statistics: Num rows: 1 Data size: 840 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 840 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Stage: Stage-7
- Conditional Operator
+ Stage: Stage-2
+ Dependency Collection
Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-8
Create Table
columns: a string, b string
name: default.null_tab3
@@ -132,7 +126,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Stage: Stage-2
+ Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
@@ -140,31 +134,7 @@ STAGE PLANS:
Column Types: string, string
Table: default.null_tab3
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.null_tab3
-
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.null_tab3
-
- Stage: Stage-6
+ Stage: Stage-0
Move Operator
files:
hdfs directory: true
diff --git a/ql/src/test/results/clientpositive/partition_discovery.q.out b/ql/src/test/results/clientpositive/llap/partition_discovery.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/partition_discovery.q.out
rename to ql/src/test/results/clientpositive/llap/partition_discovery.q.out
diff --git a/ql/src/test/results/clientpositive/repair.q.out b/ql/src/test/results/clientpositive/llap/repair.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/repair.q.out
rename to ql/src/test/results/clientpositive/llap/repair.q.out
diff --git a/ql/src/test/results/clientpositive/symlink_text_input_format.q.out b/ql/src/test/results/clientpositive/llap/symlink_text_input_format.q.out
similarity index 50%
rename from ql/src/test/results/clientpositive/symlink_text_input_format.q.out
rename to ql/src/test/results/clientpositive/llap/symlink_text_input_format.q.out
index 2fea2ca..8f862a1 100644
--- a/ql/src/test/results/clientpositive/symlink_text_input_format.q.out
+++ b/ql/src/test/results/clientpositive/llap/symlink_text_input_format.q.out
@@ -45,33 +45,42 @@ STAGE DEPENDENCIES:
STAGE PLANS:
Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: symlink_text_input_format
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: string)
- null sort order: zz
- sort order: ++
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: symlink_text_input_format
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ null sort order: zz
+ sort order: ++
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ LLAP IO: no inputs
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- Execution mode: vectorized
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
@@ -117,33 +126,42 @@ STAGE DEPENDENCIES:
STAGE PLANS:
Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: symlink_text_input_format
- Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: value (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- null sort order: z
- sort order: +
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: symlink_text_input_format
+ Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ null sort order: z
+ sort order: +
+ Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ LLAP IO: no inputs
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- Execution mode: vectorized
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
@@ -189,38 +207,47 @@ STAGE DEPENDENCIES:
STAGE PLANS:
Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: symlink_text_input_format
- Statistics: Num rows: 1 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
- Select Operator
- Statistics: Num rows: 1 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: symlink_text_input_format
+ Statistics: Num rows: 1 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 1 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ minReductionHashAggr: 0.0
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ null sort order:
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized, llap
+ LLAP IO: no inputs
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
Group By Operator
- aggregations: count()
- minReductionHashAggr: 0.99
- mode: hash
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- null sort order:
- sort order:
+ File Output Operator
+ compressed: false
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: bigint)
- Execution mode: vectorized
- Reduce Operator Tree:
- Group By Operator
- aggregations: count(VALUE._col0)
- mode: mergepartial
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
@@ -267,33 +294,42 @@ STAGE DEPENDENCIES:
STAGE PLANS:
Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: symlink_text_input_format
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: string)
- null sort order: zz
- sort order: ++
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: symlink_text_input_format
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ null sort order: zz
+ sort order: ++
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ LLAP IO: no inputs
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- Execution mode: vectorized
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
@@ -342,33 +378,42 @@ STAGE DEPENDENCIES:
STAGE PLANS:
Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: symlink_text_input_format
- Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: value (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- null sort order: z
- sort order: +
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: symlink_text_input_format
+ Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ null sort order: z
+ sort order: +
+ Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ LLAP IO: no inputs
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- Execution mode: vectorized
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
@@ -417,38 +462,47 @@ STAGE DEPENDENCIES:
STAGE PLANS:
Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: symlink_text_input_format
- Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE
- Select Operator
- Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: symlink_text_input_format
+ Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ minReductionHashAggr: 0.0
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ null sort order:
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized, llap
+ LLAP IO: no inputs
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
Group By Operator
- aggregations: count()
- minReductionHashAggr: 0.99
- mode: hash
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- null sort order:
- sort order:
+ File Output Operator
+ compressed: false
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: bigint)
- Execution mode: vectorized
- Reduce Operator Tree:
- Group By Operator
- aggregations: count(VALUE._col0)
- mode: mergepartial
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator