You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by st...@apache.org on 2022/03/15 01:28:25 UTC

[impala] branch master updated: Bump up CDP_BUILD_NUMBER to 23144489

This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


The following commit(s) were added to refs/heads/master by this push:
     new ca48b94  Bump up CDP_BUILD_NUMBER to 23144489
ca48b94 is described below

commit ca48b940ec6281d492ad525418f234308a82eedf
Author: Yu-Wen Lai <yu...@cloudera.com>
AuthorDate: Mon Mar 7 12:50:04 2022 -0800

    Bump up CDP_BUILD_NUMBER to 23144489
    
    This patch is to include HIVE-25753, which is needed to improve the
    performance of retrieving the latest committed compaction for a table.
    
    Change-Id: Ifd4ae0cba48217483a40a51f97156fabfb00cf27
    Reviewed-on: http://gerrit.cloudera.org:8080/18296
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
    Reviewed-by: Aman Sinha <am...@cloudera.com>
---
 bin/impala-config.sh                               | 24 +++++++++++-----------
 .../queries/PlannerTest/joins.test                 |  8 ++++----
 .../queries/PlannerTest/resource-requirements.test | 16 +++++++--------
 3 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/bin/impala-config.sh b/bin/impala-config.sh
index caeef79..1af9b25 100755
--- a/bin/impala-config.sh
+++ b/bin/impala-config.sh
@@ -178,20 +178,20 @@ fi
 : ${IMPALA_TOOLCHAIN_HOST:=native-toolchain.s3.amazonaws.com}
 export IMPALA_TOOLCHAIN_HOST
 
-export CDP_BUILD_NUMBER=18462549
+export CDP_BUILD_NUMBER=23144489
 export CDP_MAVEN_REPOSITORY=\
 "https://${IMPALA_TOOLCHAIN_HOST}/build/cdp_components/${CDP_BUILD_NUMBER}/maven"
-export CDP_AVRO_JAVA_VERSION=1.8.2.7.2.14.0-21
-export CDP_HADOOP_VERSION=3.1.1.7.2.14.0-21
-export CDP_HBASE_VERSION=2.2.6.7.2.14.0-21
-export CDP_HIVE_VERSION=3.1.3000.7.2.14.0-21
-export CDP_ICEBERG_VERSION=0.9.1.7.2.14.0-21
-export CDP_KNOX_VERSION=1.3.0.7.2.14.0-21
-export CDP_OZONE_VERSION=1.1.0.7.2.14.0-21
-export CDP_PARQUET_VERSION=1.10.99.7.2.14.0-21
-export CDP_RANGER_VERSION=2.1.0.7.2.14.0-21
-export CDP_TEZ_VERSION=0.9.1.7.2.14.0-21
-export CDP_GCS_VERSION=2.1.2.7.2.14.0-21
+export CDP_AVRO_JAVA_VERSION=1.8.2.7.2.15.0-88
+export CDP_HADOOP_VERSION=3.1.1.7.2.15.0-88
+export CDP_HBASE_VERSION=2.4.6.7.2.15.0-88
+export CDP_HIVE_VERSION=3.1.3000.7.2.15.0-88
+export CDP_ICEBERG_VERSION=0.9.1.7.2.15.0-88
+export CDP_KNOX_VERSION=1.3.0.7.2.15.0-88
+export CDP_OZONE_VERSION=1.1.0.7.2.15.0-88
+export CDP_PARQUET_VERSION=1.10.99.7.2.15.0-88
+export CDP_RANGER_VERSION=2.1.0.7.2.15.0-88
+export CDP_TEZ_VERSION=0.9.1.7.2.15.0-88
+export CDP_GCS_VERSION=2.1.2.7.2.15.0-88
 
 # Ref: https://infra.apache.org/release-download-pages.html#closer
 : ${APACHE_MIRROR:="https://www.apache.org/dyn/closer.cgi"}
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/joins.test b/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
index c4431d0..a6f13ba 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
@@ -358,10 +358,10 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypesagg.id = functional_hbase.alltypessmall.id, functional.alltypesagg.int_col = functional_hbase.alltypessmall.int_col
 |  runtime filters: RF000 <- functional_hbase.alltypessmall.id, RF001 <- functional_hbase.alltypessmall.int_col
-|  row-size=184B cardinality=53
+|  row-size=184B cardinality=106
 |
 |--01:SCAN HBASE [functional_hbase.alltypessmall]
-|     row-size=89B cardinality=50
+|     row-size=89B cardinality=100
 |
 00:SCAN HDFS [functional.alltypesagg]
    HDFS partitions=11/11 files=11 size=814.73KB
@@ -375,12 +375,12 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: functional.alltypesagg.id = functional_hbase.alltypessmall.id, functional.alltypesagg.int_col = functional_hbase.alltypessmall.int_col
 |  runtime filters: RF000 <- functional_hbase.alltypessmall.id, RF001 <- functional_hbase.alltypessmall.int_col
-|  row-size=184B cardinality=53
+|  row-size=184B cardinality=106
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HBASE [functional_hbase.alltypessmall]
-|     row-size=89B cardinality=50
+|     row-size=89B cardinality=100
 |
 00:SCAN HDFS [functional.alltypesagg]
    HDFS partitions=11/11 files=11 size=814.73KB
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test b/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
index ec5008d..6437384 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
@@ -1489,14 +1489,14 @@ Codegen disabled by planner
 Analyzed query: SELECT * FROM functional_hbase.alltypessmall
 
 F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
-|  Per-Host Resources: mem-estimate=4.02MB mem-reservation=4.00MB thread-reservation=1
+|  Per-Host Resources: mem-estimate=4.03MB mem-reservation=4.00MB thread-reservation=1
 PLAN-ROOT SINK
 |  output exprs: functional_hbase.alltypessmall.id, functional_hbase.alltypessmall.bigint_col, functional_hbase.alltypessmall.bool_col, functional_hbase.alltypessmall.date_string_col, functional_hbase.alltypessmall.double_col, functional_hbase.alltypessmall.float_col, functional_hbase.alltypessmall.int_col, functional_hbase.alltypessmall.month, functional_hbase.alltypessmall.smallint_col, functional_hbase.alltypessmall.string_col, functional_hbase.alltypessmall.timestamp_col, functional_ [...]
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
 |
 01:EXCHANGE [UNPARTITIONED]
-|  mem-estimate=16.00KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=89B cardinality=50
+|  mem-estimate=30.14KB mem-reservation=0B thread-reservation=0
+|  tuple-ids=0 row-size=89B cardinality=100
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -1506,7 +1506,7 @@ Per-Host Resources: mem-estimate=4.00KB mem-reservation=0B thread-reservation=1
      table: rows=100
      columns: all
    mem-estimate=4.00KB mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=89B cardinality=50
+   tuple-ids=0 row-size=89B cardinality=100
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=4.00MB Threads=2
@@ -1515,14 +1515,14 @@ Codegen disabled by planner
 Analyzed query: SELECT * FROM functional_hbase.alltypessmall
 
 F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
-|  Per-Instance Resources: mem-estimate=4.02MB mem-reservation=4.00MB thread-reservation=1
+|  Per-Instance Resources: mem-estimate=4.03MB mem-reservation=4.00MB thread-reservation=1
 PLAN-ROOT SINK
 |  output exprs: functional_hbase.alltypessmall.id, functional_hbase.alltypessmall.bigint_col, functional_hbase.alltypessmall.bool_col, functional_hbase.alltypessmall.date_string_col, functional_hbase.alltypessmall.double_col, functional_hbase.alltypessmall.float_col, functional_hbase.alltypessmall.int_col, functional_hbase.alltypessmall.month, functional_hbase.alltypessmall.smallint_col, functional_hbase.alltypessmall.string_col, functional_hbase.alltypessmall.timestamp_col, functional_ [...]
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
 |
 01:EXCHANGE [UNPARTITIONED]
-|  mem-estimate=16.00KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=89B cardinality=50
+|  mem-estimate=30.14KB mem-reservation=0B thread-reservation=0
+|  tuple-ids=0 row-size=89B cardinality=100
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -1532,7 +1532,7 @@ Per-Instance Resources: mem-estimate=4.00KB mem-reservation=0B thread-reservatio
      table: rows=100
      columns: all
    mem-estimate=4.00KB mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=89B cardinality=50
+   tuple-ids=0 row-size=89B cardinality=100
    in pipelines: 00(GETNEXT)
 ====
 # Data source scan